summaryrefslogtreecommitdiff
path: root/drivers/clk/clk.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clk/clk.c')
-rw-r--r--drivers/clk/clk.c2447
1 files changed, 1868 insertions, 579 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 75d13c0eff12..85d2f2481acf 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -6,21 +6,24 @@
* Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
*/
+#include <linux/clk/clk-conf.h>
+#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clk/clk-conf.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
+#include <linux/device.h>
#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
#include <linux/list.h>
-#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/device.h>
-#include <linux/init.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/stringhash.h>
#include "clk.h"
@@ -33,21 +36,43 @@ static struct task_struct *enable_owner;
static int prepare_refcnt;
static int enable_refcnt;
+#define CLK_HASH_BITS 9
+static DEFINE_HASHTABLE(clk_hashtable, CLK_HASH_BITS);
+
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
+/* List of registered clks that use runtime PM */
+static HLIST_HEAD(clk_rpm_list);
+static DEFINE_MUTEX(clk_rpm_list_lock);
+
+static const struct hlist_head *all_lists[] = {
+ &clk_root_list,
+ &clk_orphan_list,
+ NULL,
+};
+
/*** private data structures ***/
+struct clk_parent_map {
+ const struct clk_hw *hw;
+ struct clk_core *core;
+ const char *fw_name;
+ const char *name;
+ int index;
+};
+
struct clk_core {
const char *name;
const struct clk_ops *ops;
struct clk_hw *hw;
struct module *owner;
struct device *dev;
+ struct hlist_node rpm_node;
+ struct device_node *of_node;
struct clk_core *parent;
- const char **parent_names;
- struct clk_core **parents;
+ struct clk_parent_map *parents;
u8 num_parents;
u8 new_parent_index;
unsigned long rate;
@@ -57,6 +82,7 @@ struct clk_core {
struct clk_core *new_child;
unsigned long flags;
bool orphan;
+ bool rpm_enabled;
unsigned int enable_count;
unsigned int prepare_count;
unsigned int protect_count;
@@ -67,6 +93,7 @@ struct clk_core {
struct clk_duty duty;
struct hlist_head children;
struct hlist_node child_node;
+ struct hlist_node hashtable_node;
struct hlist_head clks;
unsigned int notifier_count;
#ifdef CONFIG_DEBUG_FS
@@ -81,6 +108,7 @@ struct clk_core {
struct clk {
struct clk_core *core;
+ struct device *dev;
const char *dev_id;
const char *con_id;
unsigned long min_rate;
@@ -92,23 +120,103 @@ struct clk {
/*** runtime pm ***/
static int clk_pm_runtime_get(struct clk_core *core)
{
- int ret = 0;
-
- if (!core->dev)
+ if (!core->rpm_enabled)
return 0;
- ret = pm_runtime_get_sync(core->dev);
- return ret < 0 ? ret : 0;
+ return pm_runtime_resume_and_get(core->dev);
}
static void clk_pm_runtime_put(struct clk_core *core)
{
- if (!core->dev)
+ if (!core->rpm_enabled)
return;
pm_runtime_put_sync(core->dev);
}
+/**
+ * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
+ *
+ * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
+ * that disabling unused clks avoids a deadlock where a device is runtime PM
+ * resuming/suspending and the runtime PM callback is trying to grab the
+ * prepare_lock for something like clk_prepare_enable() while
+ * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
+ * PM resume/suspend the device as well.
+ *
+ * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
+ * success. Otherwise the lock is released on failure.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int clk_pm_runtime_get_all(void)
+{
+ int ret;
+ struct clk_core *core, *failed;
+
+ /*
+ * Grab the list lock to prevent any new clks from being registered
+ * or unregistered until clk_pm_runtime_put_all().
+ */
+ mutex_lock(&clk_rpm_list_lock);
+
+ /*
+ * Runtime PM "get" all the devices that are needed for the clks
+ * currently registered. Do this without holding the prepare_lock, to
+ * avoid the deadlock.
+ */
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ ret = clk_pm_runtime_get(core);
+ if (ret) {
+ failed = core;
+ pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
+ dev_name(failed->dev), failed->name);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ if (core == failed)
+ break;
+
+ clk_pm_runtime_put(core);
+ }
+ mutex_unlock(&clk_rpm_list_lock);
+
+ return ret;
+}
+
+/**
+ * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
+ *
+ * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
+ * the 'clk_rpm_list_lock'.
+ */
+static void clk_pm_runtime_put_all(void)
+{
+ struct clk_core *core;
+
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
+ clk_pm_runtime_put(core);
+ mutex_unlock(&clk_rpm_list_lock);
+}
+
+static void clk_pm_runtime_init(struct clk_core *core)
+{
+ struct device *dev = core->dev;
+
+ if (dev && pm_runtime_enabled(dev)) {
+ core->rpm_enabled = true;
+
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_add_head(&core->rpm_node, &clk_rpm_list);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+}
+
/*** locking ***/
static void clk_prepare_lock(void)
{
@@ -223,7 +331,7 @@ static bool clk_core_is_enabled(struct clk_core *core)
* taking enable spinlock, but the below check is needed if one tries
* to call it from other places.
*/
- if (core->dev) {
+ if (core->rpm_enabled) {
pm_runtime_get_noresume(core->dev);
if (!pm_runtime_active(core->dev)) {
ret = false;
@@ -231,9 +339,20 @@ static bool clk_core_is_enabled(struct clk_core *core)
}
}
+ /*
+ * This could be called with the enable lock held, or from atomic
+ * context. If the parent isn't enabled already, we can't do
+ * anything here. We can also assume this clock isn't enabled.
+ */
+ if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent)
+ if (!clk_core_is_enabled(core->parent)) {
+ ret = false;
+ goto done;
+ }
+
ret = core->ops->is_enabled(core->hw);
done:
- if (core->dev)
+ if (core->rpm_enabled)
pm_runtime_put(core->dev);
return ret;
@@ -253,6 +372,18 @@ const char *clk_hw_get_name(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_name);
+struct device *clk_hw_get_dev(const struct clk_hw *hw)
+{
+ return hw->core->dev;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_dev);
+
+struct device_node *clk_hw_get_of_node(const struct clk_hw *hw)
+{
+ return hw->core->of_node;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_of_node);
+
struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->core->hw;
@@ -271,60 +402,146 @@ struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent);
-static struct clk_core *__clk_lookup_subtree(const char *name,
- struct clk_core *core)
+static struct clk_core *clk_core_lookup(const char *name)
{
- struct clk_core *child;
- struct clk_core *ret;
+ struct clk_core *core;
+ u32 hash;
- if (!strcmp(core->name, name))
- return core;
+ if (!name)
+ return NULL;
- hlist_for_each_entry(child, &core->children, child_node) {
- ret = __clk_lookup_subtree(name, child);
- if (ret)
- return ret;
- }
+ hash = full_name_hash(NULL, name, strlen(name));
+
+ /* search the hashtable */
+ hash_for_each_possible(clk_hashtable, core, hashtable_node, hash)
+ if (!strcmp(core->name, name))
+ return core;
return NULL;
}
-static struct clk_core *clk_core_lookup(const char *name)
+#ifdef CONFIG_OF
+static int of_parse_clkspec(const struct device_node *np, int index,
+ const char *name, struct of_phandle_args *out_args);
+static struct clk_hw *
+of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
+#else
+static inline int of_parse_clkspec(const struct device_node *np, int index,
+ const char *name,
+ struct of_phandle_args *out_args)
+{
+ return -ENOENT;
+}
+static inline struct clk_hw *
+of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
{
- struct clk_core *root_clk;
- struct clk_core *ret;
+ return ERR_PTR(-ENOENT);
+}
+#endif
- if (!name)
- return NULL;
+/**
+ * clk_core_get - Find the clk_core parent of a clk
+ * @core: clk to find parent of
+ * @p_index: parent index to search for
+ *
+ * This is the preferred method for clk providers to find the parent of a
+ * clk when that parent is external to the clk controller. The parent_names
+ * array is indexed and treated as a local name matching a string in the device
+ * node's 'clock-names' property or as the 'con_id' matching the device's
+ * dev_name() in a clk_lookup. This allows clk providers to use their own
+ * namespace instead of looking for a globally unique parent string.
+ *
+ * For example the following DT snippet would allow a clock registered by the
+ * clock-controller@c001 that has a clk_init_data::parent_data array
+ * with 'xtal' in the 'name' member to find the clock provided by the
+ * clock-controller@f00abcd without needing to get the globally unique name of
+ * the xtal clk.
+ *
+ * parent: clock-controller@f00abcd {
+ * reg = <0xf00abcd 0xabcd>;
+ * #clock-cells = <0>;
+ * };
+ *
+ * clock-controller@c001 {
+ * reg = <0xc001 0xf00d>;
+ * clocks = <&parent>;
+ * clock-names = "xtal";
+ * #clock-cells = <1>;
+ * };
+ *
+ * Returns: -ENOENT when the provider can't be found or the clk doesn't
+ * exist in the provider or the name can't be found in the DT node or
+ * in a clkdev lookup. NULL when the provider knows about the clk but it
+ * isn't provided on this system.
+ * A valid clk_core pointer when the clk can be found in the provider.
+ */
+static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
+{
+ const char *name = core->parents[p_index].fw_name;
+ int index = core->parents[p_index].index;
+ struct clk_hw *hw = ERR_PTR(-ENOENT);
+ struct device *dev = core->dev;
+ const char *dev_id = dev ? dev_name(dev) : NULL;
+ struct device_node *np = core->of_node;
+ struct of_phandle_args clkspec;
- /* search the 'proper' clk tree first */
- hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
- ret = __clk_lookup_subtree(name, root_clk);
- if (ret)
- return ret;
+ if (np && (name || index >= 0) &&
+ !of_parse_clkspec(np, index, name, &clkspec)) {
+ hw = of_clk_get_hw_from_clkspec(&clkspec);
+ of_node_put(clkspec.np);
+ } else if (name) {
+ /*
+ * If the DT search above couldn't find the provider fallback to
+ * looking up via clkdev based clk_lookups.
+ */
+ hw = clk_find_hw(dev_id, name);
}
- /* if not found, then search the orphan tree */
- hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
- ret = __clk_lookup_subtree(name, root_clk);
- if (ret)
- return ret;
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+
+ if (!hw)
+ return NULL;
+
+ return hw->core;
+}
+
+static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
+{
+ struct clk_parent_map *entry = &core->parents[index];
+ struct clk_core *parent;
+
+ if (entry->hw) {
+ parent = entry->hw->core;
+ } else {
+ parent = clk_core_get(core, index);
+ if (PTR_ERR(parent) == -ENOENT && entry->name)
+ parent = clk_core_lookup(entry->name);
}
- return NULL;
+ /*
+ * We have a direct reference but it isn't registered yet?
+ * Orphan it and let clk_reparent() update the orphan status
+ * when the parent is registered.
+ */
+ if (!parent)
+ parent = ERR_PTR(-EPROBE_DEFER);
+
+ /* Only cache it if it's not an error */
+ if (!IS_ERR(parent))
+ entry->core = parent;
}
static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
u8 index)
{
- if (!core || index >= core->num_parents)
+ if (!core || index >= core->num_parents || !core->parents)
return NULL;
- if (!core->parents[index])
- core->parents[index] =
- clk_core_lookup(core->parent_names[index]);
+ if (!core->parents[index].core)
+ clk_core_fill_parent_index(core, index);
- return core->parents[index];
+ return core->parents[index].core;
}
struct clk_hw *
@@ -345,23 +562,18 @@ unsigned int __clk_get_enable_count(struct clk *clk)
static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
{
- unsigned long ret;
-
- if (!core) {
- ret = 0;
- goto out;
- }
-
- ret = core->rate;
-
- if (!core->num_parents)
- goto out;
+ if (!core)
+ return 0;
- if (!core->parent)
- ret = 0;
+ if (!core->num_parents || core->parent)
+ return core->rate;
-out:
- return ret;
+ /*
+ * Clk must have a parent because num_parents > 0 but the parent isn't
+ * known yet. Best to return 0 as the rate of this clk until we can
+ * properly recalc the rate based on the parent's rate.
+ */
+ return 0;
}
unsigned long clk_hw_get_rate(const struct clk_hw *hw)
@@ -370,7 +582,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_rate);
-static unsigned long __clk_get_accuracy(struct clk_core *core)
+static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
{
if (!core)
return 0;
@@ -378,12 +590,6 @@ static unsigned long __clk_get_accuracy(struct clk_core *core)
return core->accuracy;
}
-unsigned long __clk_get_flags(struct clk *clk)
-{
- return !clk ? 0 : clk->core->flags;
-}
-EXPORT_SYMBOL_GPL(__clk_get_flags);
-
unsigned long clk_hw_get_flags(const struct clk_hw *hw)
{
return hw->core->flags;
@@ -394,16 +600,13 @@ bool clk_hw_is_prepared(const struct clk_hw *hw)
{
return clk_core_is_prepared(hw->core);
}
-
-bool clk_hw_rate_is_protected(const struct clk_hw *hw)
-{
- return clk_core_rate_is_protected(hw->core);
-}
+EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
bool clk_hw_is_enabled(const struct clk_hw *hw)
{
return clk_core_is_enabled(hw->core);
}
+EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
bool __clk_is_enabled(struct clk *clk)
{
@@ -423,6 +626,94 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now,
return now <= rate && now > best;
}
+static void clk_core_init_rate_req(struct clk_core * const core,
+ struct clk_rate_request *req,
+ unsigned long rate);
+
+static int clk_core_round_rate_nolock(struct clk_core *core,
+ struct clk_rate_request *req);
+
+static bool clk_core_has_parent(struct clk_core *core, const struct clk_core *parent)
+{
+ struct clk_core *tmp;
+ unsigned int i;
+
+ /* Optimize for the case where the parent is already the parent. */
+ if (core->parent == parent)
+ return true;
+
+ for (i = 0; i < core->num_parents; i++) {
+ tmp = clk_core_get_parent_by_index(core, i);
+ if (!tmp)
+ continue;
+
+ if (tmp == parent)
+ return true;
+ }
+
+ return false;
+}
+
+static void
+clk_core_forward_rate_req(struct clk_core *core,
+ const struct clk_rate_request *old_req,
+ struct clk_core *parent,
+ struct clk_rate_request *req,
+ unsigned long parent_rate)
+{
+ if (WARN_ON(!clk_core_has_parent(core, parent)))
+ return;
+
+ clk_core_init_rate_req(parent, req, parent_rate);
+
+ if (req->min_rate < old_req->min_rate)
+ req->min_rate = old_req->min_rate;
+
+ if (req->max_rate > old_req->max_rate)
+ req->max_rate = old_req->max_rate;
+}
+
+static int
+clk_core_determine_rate_no_reparent(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_core *core = hw->core;
+ struct clk_core *parent = core->parent;
+ unsigned long best;
+ int ret;
+
+ if (core->flags & CLK_SET_RATE_PARENT) {
+ struct clk_rate_request parent_req;
+
+ if (!parent) {
+ req->rate = 0;
+ return 0;
+ }
+
+ clk_core_forward_rate_req(core, req, parent, &parent_req,
+ req->rate);
+
+ trace_clk_rate_request_start(&parent_req);
+
+ ret = clk_core_round_rate_nolock(parent, &parent_req);
+ if (ret)
+ return ret;
+
+ trace_clk_rate_request_done(&parent_req);
+
+ best = parent_req.rate;
+ } else if (parent) {
+ best = clk_core_get_rate_nolock(parent);
+ } else {
+ best = clk_core_get_rate_nolock(core);
+ }
+
+ req->best_parent_rate = best;
+ req->rate = best;
+
+ return 0;
+}
+
int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_rate_request *req,
unsigned long flags)
@@ -430,56 +721,49 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_core *core = hw->core, *parent, *best_parent = NULL;
int i, num_parents, ret;
unsigned long best = 0;
- struct clk_rate_request parent_req = *req;
/* if NO_REPARENT flag set, pass through to current parent */
- if (core->flags & CLK_SET_RATE_NO_REPARENT) {
- parent = core->parent;
- if (core->flags & CLK_SET_RATE_PARENT) {
- ret = __clk_determine_rate(parent ? parent->hw : NULL,
- &parent_req);
- if (ret)
- return ret;
-
- best = parent_req.rate;
- } else if (parent) {
- best = clk_core_get_rate_nolock(parent);
- } else {
- best = clk_core_get_rate_nolock(core);
- }
-
- goto out;
- }
+ if (core->flags & CLK_SET_RATE_NO_REPARENT)
+ return clk_core_determine_rate_no_reparent(hw, req);
/* find the parent that can provide the fastest rate <= rate */
num_parents = core->num_parents;
for (i = 0; i < num_parents; i++) {
+ unsigned long parent_rate;
+
parent = clk_core_get_parent_by_index(core, i);
if (!parent)
continue;
if (core->flags & CLK_SET_RATE_PARENT) {
- parent_req = *req;
- ret = __clk_determine_rate(parent->hw, &parent_req);
+ struct clk_rate_request parent_req;
+
+ clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
+
+ trace_clk_rate_request_start(&parent_req);
+
+ ret = clk_core_round_rate_nolock(parent, &parent_req);
if (ret)
continue;
+
+ trace_clk_rate_request_done(&parent_req);
+
+ parent_rate = parent_req.rate;
} else {
- parent_req.rate = clk_core_get_rate_nolock(parent);
+ parent_rate = clk_core_get_rate_nolock(parent);
}
- if (mux_is_better_rate(req->rate, parent_req.rate,
+ if (mux_is_better_rate(req->rate, parent_rate,
best, flags)) {
best_parent = parent;
- best = parent_req.rate;
+ best = parent_rate;
}
}
if (!best_parent)
return -EINVAL;
-out:
- if (best_parent)
- req->best_parent_hw = best_parent->hw;
+ req->best_parent_hw = best_parent->hw;
req->best_parent_rate = best;
req->rate = best;
@@ -500,6 +784,8 @@ static void clk_core_get_boundaries(struct clk_core *core,
{
struct clk *clk_user;
+ lockdep_assert_held(&prepare_lock);
+
*min_rate = core->min_rate;
*max_rate = core->max_rate;
@@ -510,6 +796,40 @@ static void clk_core_get_boundaries(struct clk_core *core,
*max_rate = min(*max_rate, clk_user->max_rate);
}
+/*
+ * clk_hw_get_rate_range() - returns the clock rate range for a hw clk
+ * @hw: the hw clk we want to get the range from
+ * @min_rate: pointer to the variable that will hold the minimum
+ * @max_rate: pointer to the variable that will hold the maximum
+ *
+ * Fills the @min_rate and @max_rate variables with the minimum and
+ * maximum that clock can reach.
+ */
+void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
+ unsigned long *max_rate)
+{
+ clk_core_get_boundaries(hw->core, min_rate, max_rate);
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_rate_range);
+
+static bool clk_core_check_boundaries(struct clk_core *core,
+ unsigned long min_rate,
+ unsigned long max_rate)
+{
+ struct clk *user;
+
+ lockdep_assert_held(&prepare_lock);
+
+ if (min_rate > core->max_rate || max_rate < core->min_rate)
+ return false;
+
+ hlist_for_each_entry(user, &core->clks, clks_node)
+ if (min_rate > user->max_rate || max_rate < user->min_rate)
+ return false;
+
+ return true;
+}
+
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
unsigned long max_rate)
{
@@ -519,9 +839,15 @@ void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
/*
+ * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
+ * @hw: mux type clk to determine rate on
+ * @req: rate request, also used to return preferred parent and frequencies
+ *
* Helper for finding best parent to provide a given frequency. This can be used
* directly as a determine_rate callback (e.g. for a mux), or from a more
* complex clock that may combine a mux with other operations.
+ *
+ * Returns: 0 on success, -EERROR value on error
*/
int __clk_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
@@ -537,6 +863,25 @@ int __clk_mux_determine_rate_closest(struct clk_hw *hw,
}
EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
+/*
+ * clk_hw_determine_rate_no_reparent - clk_ops::determine_rate implementation for a clk that doesn't reparent
+ * @hw: mux type clk to determine rate on
+ * @req: rate request, also used to return preferred frequency
+ *
+ * Helper for finding best parent rate to provide a given frequency.
+ * This can be used directly as a determine_rate callback (e.g. for a
+ * mux), or from a more complex clock that may combine a mux with other
+ * operations.
+ *
+ * Returns: 0 on success, -EERROR value on error
+ */
+int clk_hw_determine_rate_no_reparent(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return clk_core_determine_rate_no_reparent(hw, req);
+}
+EXPORT_SYMBOL_GPL(clk_hw_determine_rate_no_reparent);
+
/*** clk api ***/
static void clk_core_rate_unprotect(struct clk_core *core)
@@ -645,7 +990,7 @@ static void clk_core_rate_restore_protect(struct clk_core *core, int count)
* clk_rate_exclusive_get - get exclusivity over the clk rate control
* @clk: the clk over which the exclusity of rate control is requested
*
- * clk_rate_exlusive_get() begins a critical section during which a clock
+ * clk_rate_exclusive_get() begins a critical section during which a clock
* consumer cannot tolerate any other consumer making any operation on the
* clock which could result in a rate change or rate glitch. Exclusive clocks
* cannot have their rate changed, either directly or indirectly due to changes
@@ -673,6 +1018,25 @@ int clk_rate_exclusive_get(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
+static void devm_clk_rate_exclusive_put(void *data)
+{
+ struct clk *clk = data;
+
+ clk_rate_exclusive_put(clk);
+}
+
+int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk)
+{
+ int ret;
+
+ ret = clk_rate_exclusive_get(clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_clk_rate_exclusive_put, clk);
+}
+EXPORT_SYMBOL_GPL(devm_clk_rate_exclusive_get);
+
static void clk_core_unprepare(struct clk_core *core)
{
lockdep_assert_held(&prepare_lock);
@@ -701,10 +1065,9 @@ static void clk_core_unprepare(struct clk_core *core)
if (core->ops->unprepare)
core->ops->unprepare(core->hw);
- clk_pm_runtime_put(core);
-
trace_clk_unprepare_complete(core);
clk_core_unprepare(core->parent);
+ clk_pm_runtime_put(core);
}
static void clk_core_unprepare_lock(struct clk_core *core)
@@ -832,12 +1195,12 @@ static void clk_core_disable(struct clk_core *core)
if (--core->enable_count > 0)
return;
- trace_clk_disable_rcuidle(core);
+ trace_clk_disable(core);
if (core->ops->disable)
core->ops->disable(core->hw);
- trace_clk_disable_complete_rcuidle(core);
+ trace_clk_disable_complete(core);
clk_core_disable(core->parent);
}
@@ -891,12 +1254,12 @@ static int clk_core_enable(struct clk_core *core)
if (ret)
return ret;
- trace_clk_enable_rcuidle(core);
+ trace_clk_enable(core);
if (core->ops->enable)
ret = core->ops->enable(core->hw);
- trace_clk_enable_complete_rcuidle(core);
+ trace_clk_enable_complete(core);
if (ret) {
clk_core_disable(core->parent);
@@ -1037,6 +1400,27 @@ int clk_enable(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_enable);
+/**
+ * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
+ * @clk: clock source
+ *
+ * Returns true if clk_prepare() implicitly enables the clock, effectively
+ * making clk_enable()/clk_disable() no-ops, false otherwise.
+ *
+ * This is of interest mainly to power management code where actually
+ * disabling the clock also requires unpreparing it to have any material
+ * effect.
+ *
+ * Regardless of the value returned here, the caller must always invoke
+ * clk_enable() or clk_prepare_enable() and counterparts for usage counts
+ * to be right.
+ */
+bool clk_is_enabled_when_prepared(struct clk *clk)
+{
+ return clk && !(clk->core->ops->enable && clk->core->ops->disable);
+}
+EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared);
+
static int clk_core_prepare_enable(struct clk_core *core)
{
int ret;
@@ -1058,7 +1442,7 @@ static void clk_core_disable_unprepare(struct clk_core *core)
clk_core_unprepare_lock(core);
}
-static void clk_unprepare_unused_subtree(struct clk_core *core)
+static void __init clk_unprepare_unused_subtree(struct clk_core *core)
{
struct clk_core *child;
@@ -1073,9 +1457,6 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED)
return;
- if (clk_pm_runtime_get(core))
- return;
-
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
@@ -1084,11 +1465,9 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
-
- clk_pm_runtime_put(core);
}
-static void clk_disable_unused_subtree(struct clk_core *core)
+static void __init clk_disable_unused_subtree(struct clk_core *core)
{
struct clk_core *child;
unsigned long flags;
@@ -1101,9 +1480,6 @@ static void clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
- if (clk_pm_runtime_get(core))
- goto unprepare_out;
-
flags = clk_enable_lock();
if (core->enable_count)
@@ -1128,13 +1504,11 @@ static void clk_disable_unused_subtree(struct clk_core *core)
unlock_out:
clk_enable_unlock(flags);
- clk_pm_runtime_put(core);
-unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
-static bool clk_ignore_unused;
+static bool clk_ignore_unused __initdata;
static int __init clk_ignore_unused_setup(char *__unused)
{
clk_ignore_unused = true;
@@ -1142,15 +1516,25 @@ static int __init clk_ignore_unused_setup(char *__unused)
}
__setup("clk_ignore_unused", clk_ignore_unused_setup);
-static int clk_disable_unused(void)
+static int __init clk_disable_unused(void)
{
struct clk_core *core;
+ int ret;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
return 0;
}
+ pr_info("clk: Disabling unused clocks\n");
+
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
+ /*
+ * Grab the prepare lock to keep the clk topology stable while iterating
+ * over clks.
+ */
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
@@ -1167,6 +1551,8 @@ static int clk_disable_unused(void)
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
+
return 0;
}
late_initcall_sync(clk_disable_unused);
@@ -1182,7 +1568,21 @@ static int clk_core_determine_round_nolock(struct clk_core *core,
return 0;
/*
- * At this point, core protection will be disabled if
+ * Some clock providers hand-craft their clk_rate_requests and
+ * might not fill min_rate and max_rate.
+ *
+ * If it's the case, clamping the rate is equivalent to setting
+ * the rate to 0 which is bad. Skip the clamping but complain so
+ * that it gets fixed, hopefully.
+ */
+ if (!req->min_rate && !req->max_rate)
+ pr_warn("%s: %s: clk_rate_request has initialized min or max rate.\n",
+ __func__, core->name);
+ else
+ req->rate = clamp(req->rate, req->min_rate, req->max_rate);
+
+ /*
+ * At this point, core protection will be disabled
* - if the provider is not protected at all
* - if the calling consumer is the only one which has exclusivity
* over the provider
@@ -1206,13 +1606,24 @@ static int clk_core_determine_round_nolock(struct clk_core *core,
}
static void clk_core_init_rate_req(struct clk_core * const core,
- struct clk_rate_request *req)
+ struct clk_rate_request *req,
+ unsigned long rate)
{
struct clk_core *parent;
- if (WARN_ON(!core || !req))
+ if (WARN_ON(!req))
return;
+ memset(req, 0, sizeof(*req));
+ req->max_rate = ULONG_MAX;
+
+ if (!core)
+ return;
+
+ req->core = core;
+ req->rate = rate;
+ clk_core_get_boundaries(core, &req->min_rate, &req->max_rate);
+
parent = core->parent;
if (parent) {
req->best_parent_hw = parent->hw;
@@ -1223,17 +1634,62 @@ static void clk_core_init_rate_req(struct clk_core * const core,
}
}
-static bool clk_core_can_round(struct clk_core * const core)
+/**
+ * clk_hw_init_rate_request - Initializes a clk_rate_request
+ * @hw: the clk for which we want to submit a rate request
+ * @req: the clk_rate_request structure we want to initialise
+ * @rate: the rate which is to be requested
+ *
+ * Initializes a clk_rate_request structure to submit to
+ * __clk_determine_rate() or similar functions.
+ */
+void clk_hw_init_rate_request(const struct clk_hw *hw,
+ struct clk_rate_request *req,
+ unsigned long rate)
{
- if (core->ops->determine_rate || core->ops->round_rate)
- return true;
+ if (WARN_ON(!hw || !req))
+ return;
- return false;
+ clk_core_init_rate_req(hw->core, req, rate);
+}
+EXPORT_SYMBOL_GPL(clk_hw_init_rate_request);
+
+/**
+ * clk_hw_forward_rate_request - Forwards a clk_rate_request to a clock's parent
+ * @hw: the original clock that got the rate request
+ * @old_req: the original clk_rate_request structure we want to forward
+ * @parent: the clk we want to forward @old_req to
+ * @req: the clk_rate_request structure we want to initialise
+ * @parent_rate: The rate which is to be requested to @parent
+ *
+ * Initializes a clk_rate_request structure to submit to a clock parent
+ * in __clk_determine_rate() or similar functions.
+ */
+void clk_hw_forward_rate_request(const struct clk_hw *hw,
+ const struct clk_rate_request *old_req,
+ const struct clk_hw *parent,
+ struct clk_rate_request *req,
+ unsigned long parent_rate)
+{
+ if (WARN_ON(!hw || !old_req || !parent || !req))
+ return;
+
+ clk_core_forward_rate_req(hw->core, old_req,
+ parent->core, req,
+ parent_rate);
+}
+EXPORT_SYMBOL_GPL(clk_hw_forward_rate_request);
+
+static bool clk_core_can_round(struct clk_core * const core)
+{
+ return core->ops->determine_rate || core->ops->round_rate;
}
static int clk_core_round_rate_nolock(struct clk_core *core,
struct clk_rate_request *req)
{
+ int ret;
+
lockdep_assert_held(&prepare_lock);
if (!core) {
@@ -1241,12 +1697,27 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
return 0;
}
- clk_core_init_rate_req(core, req);
-
if (clk_core_can_round(core))
return clk_core_determine_round_nolock(core, req);
- else if (core->flags & CLK_SET_RATE_PARENT)
- return clk_core_round_rate_nolock(core->parent, req);
+
+ if (core->flags & CLK_SET_RATE_PARENT) {
+ struct clk_rate_request parent_req;
+
+ clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate);
+
+ trace_clk_rate_request_start(&parent_req);
+
+ ret = clk_core_round_rate_nolock(core->parent, &parent_req);
+ if (ret)
+ return ret;
+
+ trace_clk_rate_request_done(&parent_req);
+
+ req->best_parent_rate = parent_req.rate;
+ req->rate = parent_req.rate;
+
+ return 0;
+ }
req->rate = core->rate;
return 0;
@@ -1270,18 +1741,36 @@ int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
}
EXPORT_SYMBOL_GPL(__clk_determine_rate);
+/**
+ * clk_hw_round_rate() - round the given rate for a hw clk
+ * @hw: the hw clk for which we are rounding a rate
+ * @rate: the rate which is to be rounded
+ *
+ * Takes in a rate as input and rounds it to a rate that the clk can actually
+ * use.
+ *
+ * Context: prepare_lock must be held.
+ * For clk providers to call from within clk_ops such as .round_rate,
+ * .determine_rate.
+ *
+ * Return: returns rounded rate of hw clk if clk supports round_rate operation
+ * else returns the parent rate.
+ */
unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
{
int ret;
struct clk_rate_request req;
- clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
- req.rate = rate;
+ clk_core_init_rate_req(hw->core, &req, rate);
+
+ trace_clk_rate_request_start(&req);
ret = clk_core_round_rate_nolock(hw->core, &req);
if (ret)
return 0;
+ trace_clk_rate_request_done(&req);
+
return req.rate;
}
EXPORT_SYMBOL_GPL(clk_hw_round_rate);
@@ -1308,11 +1797,14 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
- clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
- req.rate = rate;
+ clk_core_init_rate_req(clk->core, &req, rate);
+
+ trace_clk_rate_request_start(&req);
ret = clk_core_round_rate_nolock(clk->core, &req);
+ trace_clk_rate_request_done(&req);
+
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
@@ -1391,18 +1883,12 @@ static void __clk_recalc_accuracies(struct clk_core *core)
__clk_recalc_accuracies(child);
}
-static long clk_core_get_accuracy(struct clk_core *core)
+static long clk_core_get_accuracy_recalc(struct clk_core *core)
{
- unsigned long accuracy;
-
- clk_prepare_lock();
if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
__clk_recalc_accuracies(core);
- accuracy = __clk_get_accuracy(core);
- clk_prepare_unlock();
-
- return accuracy;
+ return clk_core_get_accuracy_no_lock(core);
}
/**
@@ -1416,10 +1902,16 @@ static long clk_core_get_accuracy(struct clk_core *core)
*/
long clk_get_accuracy(struct clk *clk)
{
+ long accuracy;
+
if (!clk)
return 0;
- return clk_core_get_accuracy(clk->core);
+ clk_prepare_lock();
+ accuracy = clk_core_get_accuracy_recalc(clk->core);
+ clk_prepare_unlock();
+
+ return accuracy;
}
EXPORT_SYMBOL_GPL(clk_get_accuracy);
@@ -1438,6 +1930,7 @@ static unsigned long clk_recalc(struct clk_core *core,
/**
* __clk_recalc_rates
* @core: first clk in the subtree
+ * @update_req: Whether req_rate should be updated with the new rate
* @msg: notification type (see include/linux/clk.h)
*
* Walks the subtree of clks starting with clk and recalculates rates as it
@@ -1447,7 +1940,8 @@ static unsigned long clk_recalc(struct clk_core *core,
* clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
* if necessary.
*/
-static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
+static void __clk_recalc_rates(struct clk_core *core, bool update_req,
+ unsigned long msg)
{
unsigned long old_rate;
unsigned long parent_rate = 0;
@@ -1461,6 +1955,8 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
parent_rate = core->parent->rate;
core->rate = clk_recalc(core, parent_rate);
+ if (update_req)
+ core->req_rate = core->rate;
/*
* ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
@@ -1470,22 +1966,15 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
__clk_notify(core, msg, old_rate, core->rate);
hlist_for_each_entry(child, &core->children, child_node)
- __clk_recalc_rates(child, msg);
+ __clk_recalc_rates(child, update_req, msg);
}
-static unsigned long clk_core_get_rate(struct clk_core *core)
+static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
{
- unsigned long rate;
-
- clk_prepare_lock();
-
if (core && (core->flags & CLK_GET_RATE_NOCACHE))
- __clk_recalc_rates(core, 0);
-
- rate = clk_core_get_rate_nolock(core);
- clk_prepare_unlock();
+ __clk_recalc_rates(core, false, 0);
- return rate;
+ return clk_core_get_rate_nolock(core);
}
/**
@@ -1493,15 +1982,22 @@ static unsigned long clk_core_get_rate(struct clk_core *core)
* @clk: the clk whose rate is being returned
*
* Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
- * is set, which means a recalc_rate will be issued.
- * If clk is NULL then returns 0.
+ * is set, which means a recalc_rate will be issued. Can be called regardless of
+ * the clock enabledness. If clk is NULL, or if an error occurred, then returns
+ * 0.
*/
unsigned long clk_get_rate(struct clk *clk)
{
+ unsigned long rate;
+
if (!clk)
return 0;
- return clk_core_get_rate(clk->core);
+ clk_prepare_lock();
+ rate = clk_core_get_rate_recalc(clk->core);
+ clk_prepare_unlock();
+
+ return rate;
}
EXPORT_SYMBOL_GPL(clk_get_rate);
@@ -1513,13 +2009,59 @@ static int clk_fetch_parent_index(struct clk_core *core,
if (!parent)
return -EINVAL;
- for (i = 0; i < core->num_parents; i++)
- if (clk_core_get_parent_by_index(core, i) == parent)
+ for (i = 0; i < core->num_parents; i++) {
+ /* Found it first try! */
+ if (core->parents[i].core == parent)
return i;
- return -EINVAL;
+ /* Something else is here, so keep looking */
+ if (core->parents[i].core)
+ continue;
+
+ /* Maybe core hasn't been cached but the hw is all we know? */
+ if (core->parents[i].hw) {
+ if (core->parents[i].hw == parent->hw)
+ break;
+
+ /* Didn't match, but we're expecting a clk_hw */
+ continue;
+ }
+
+ /* Maybe it hasn't been cached (clk_set_parent() path) */
+ if (parent == clk_core_get(core, i))
+ break;
+
+ /* Fallback to comparing globally unique names */
+ if (core->parents[i].name &&
+ !strcmp(parent->name, core->parents[i].name))
+ break;
+ }
+
+ if (i == core->num_parents)
+ return -EINVAL;
+
+ core->parents[i].core = parent;
+ return i;
}
+/**
+ * clk_hw_get_parent_index - return the index of the parent clock
+ * @hw: clk_hw associated with the clk being consumed
+ *
+ * Fetches and returns the index of parent clock. Returns -EINVAL if the given
+ * clock does not have a current parent.
+ */
+int clk_hw_get_parent_index(struct clk_hw *hw)
+{
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+
+ if (WARN_ON(parent == NULL))
+ return -EINVAL;
+
+ return clk_fetch_parent_index(hw->core, parent->core);
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
+
/*
* Update the orphan status of @core and all its children.
*/
@@ -1646,6 +2188,7 @@ static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
flags = clk_enable_lock();
clk_reparent(core, old_parent);
clk_enable_unlock(flags);
+
__clk_set_parent_after(core, old_parent, parent);
return ret;
@@ -1734,7 +2277,7 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
unsigned long min_rate;
unsigned long max_rate;
int p_index = 0;
- long ret;
+ int ret;
/* sanity */
if (IS_ERR_OR_NULL(core))
@@ -1751,16 +2294,16 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
if (clk_core_can_round(core)) {
struct clk_rate_request req;
- req.rate = rate;
- req.min_rate = min_rate;
- req.max_rate = max_rate;
+ clk_core_init_rate_req(core, &req, rate);
- clk_core_init_rate_req(core, &req);
+ trace_clk_rate_request_start(&req);
ret = clk_core_determine_round_nolock(core, &req);
if (ret < 0)
return NULL;
+ trace_clk_rate_request_done(&req);
+
best_parent_rate = req.best_parent_rate;
new_rate = req.rate;
parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
@@ -1873,12 +2416,8 @@ static void clk_change_rate(struct clk_core *core)
return;
if (core->flags & CLK_SET_RATE_UNGATE) {
- unsigned long flags;
-
clk_core_prepare(core);
- flags = clk_enable_lock();
- clk_core_enable(core);
- clk_enable_unlock(flags);
+ clk_core_enable_lock(core);
}
if (core->new_parent && core->new_parent != core->parent) {
@@ -1911,11 +2450,7 @@ static void clk_change_rate(struct clk_core *core)
core->rate = clk_recalc(core, best_parent_rate);
if (core->flags & CLK_SET_RATE_UNGATE) {
- unsigned long flags;
-
- flags = clk_enable_lock();
- clk_core_disable(core);
- clk_enable_unlock(flags);
+ clk_core_disable_lock(core);
clk_core_unprepare(core);
}
@@ -1962,11 +2497,14 @@ static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
if (cnt < 0)
return cnt;
- clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
- req.rate = req_rate;
+ clk_core_init_rate_req(core, &req, req_rate);
+
+ trace_clk_rate_request_start(&req);
ret = clk_core_round_rate_nolock(core, &req);
+ trace_clk_rate_request_done(&req);
+
/* restore the protection */
clk_core_rate_restore_protect(core, cnt);
@@ -1978,7 +2516,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
{
struct clk_core *top, *fail_clk;
unsigned long rate;
- int ret = 0;
+ int ret;
if (!core)
return 0;
@@ -2068,7 +2606,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
EXPORT_SYMBOL_GPL(clk_set_rate);
/**
- * clk_set_rate_exclusive - specify a new rate get exclusive control
+ * clk_set_rate_exclusive - specify a new rate and get exclusive control
* @clk: the clk whose rate is being changed
* @rate: the new rate for clk
*
@@ -2076,7 +2614,7 @@ EXPORT_SYMBOL_GPL(clk_set_rate);
* within a critical section
*
* This can be used initially to ensure that at least 1 consumer is
- * statisfied when several consumers are competing for exclusivity over the
+ * satisfied when several consumers are competing for exclusivity over the
* same clock provider.
*
* The exclusivity is not applied if setting the rate failed.
@@ -2114,22 +2652,20 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
}
EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
-/**
- * clk_set_rate_range - set a rate range for a clock source
- * @clk: clock source
- * @min: desired minimum clock rate in Hz, inclusive
- * @max: desired maximum clock rate in Hz, inclusive
- *
- * Returns success (0) or negative errno.
- */
-int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+static int clk_set_rate_range_nolock(struct clk *clk,
+ unsigned long min,
+ unsigned long max)
{
int ret = 0;
unsigned long old_min, old_max, rate;
+ lockdep_assert_held(&prepare_lock);
+
if (!clk)
return 0;
+ trace_clk_set_rate_range(clk->core, min, max);
+
if (min > max) {
pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
__func__, clk->core->name, clk->dev_id, clk->con_id,
@@ -2137,8 +2673,6 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
return -EINVAL;
}
- clk_prepare_lock();
-
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
@@ -2148,37 +2682,66 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
clk->min_rate = min;
clk->max_rate = max;
- rate = clk_core_get_rate_nolock(clk->core);
- if (rate < min || rate > max) {
- /*
- * FIXME:
- * We are in bit of trouble here, current rate is outside the
- * the requested range. We are going try to request appropriate
- * range boundary but there is a catch. It may fail for the
- * usual reason (clock broken, clock protected, etc) but also
- * because:
- * - round_rate() was not favorable and fell on the wrong
- * side of the boundary
- * - the determine_rate() callback does not really check for
- * this corner case when determining the rate
- */
+ if (!clk_core_check_boundaries(clk->core, min, max)) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (rate < min)
- rate = min;
- else
- rate = max;
+ rate = clk->core->req_rate;
+ if (clk->core->flags & CLK_GET_RATE_NOCACHE)
+ rate = clk_core_get_rate_recalc(clk->core);
- ret = clk_core_set_rate_nolock(clk->core, rate);
- if (ret) {
- /* rollback the changes */
- clk->min_rate = old_min;
- clk->max_rate = old_max;
- }
+ /*
+ * Since the boundaries have been changed, let's give the
+ * opportunity to the provider to adjust the clock rate based on
+ * the new boundaries.
+ *
+ * We also need to handle the case where the clock is currently
+ * outside of the boundaries. Clamping the last requested rate
+ * to the current minimum and maximum will also handle this.
+ *
+ * FIXME:
+ * There is a catch. It may fail for the usual reason (clock
+ * broken, clock protected, etc) but also because:
+ * - round_rate() was not favorable and fell on the wrong
+ * side of the boundary
+ * - the determine_rate() callback does not really check for
+ * this corner case when determining the rate
+ */
+ rate = clamp(rate, min, max);
+ ret = clk_core_set_rate_nolock(clk->core, rate);
+ if (ret) {
+ /* rollback the changes */
+ clk->min_rate = old_min;
+ clk->max_rate = old_max;
}
+out:
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
+ return ret;
+}
+
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Return: 0 for success or negative errno on failure.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+{
+ int ret;
+
+ if (!clk)
+ return 0;
+
+ clk_prepare_lock();
+
+ ret = clk_set_rate_range_nolock(clk, min, max);
+
clk_prepare_unlock();
return ret;
@@ -2197,6 +2760,8 @@ int clk_set_min_rate(struct clk *clk, unsigned long rate)
if (!clk)
return 0;
+ trace_clk_set_min_rate(clk->core, rate);
+
return clk_set_rate_range(clk, rate, clk->max_rate);
}
EXPORT_SYMBOL_GPL(clk_set_min_rate);
@@ -2213,6 +2778,8 @@ int clk_set_max_rate(struct clk *clk, unsigned long rate)
if (!clk)
return 0;
+ trace_clk_set_max_rate(clk->core, rate);
+
return clk_set_rate_range(clk, clk->min_rate, rate);
}
EXPORT_SYMBOL_GPL(clk_set_max_rate);
@@ -2254,7 +2821,7 @@ static void clk_core_reparent(struct clk_core *core,
{
clk_reparent(core, new_parent);
__clk_recalc_accuracies(core);
- __clk_recalc_rates(core, POST_RATE_CHANGE);
+ __clk_recalc_rates(core, true, POST_RATE_CHANGE);
}
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
@@ -2275,23 +2842,13 @@ void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
*
* Returns true if @parent is a possible parent for @clk, false otherwise.
*/
-bool clk_has_parent(struct clk *clk, struct clk *parent)
+bool clk_has_parent(const struct clk *clk, const struct clk *parent)
{
- struct clk_core *core, *parent_core;
-
/* NULL clocks should be nops, so return success if either is NULL. */
if (!clk || !parent)
return true;
- core = clk->core;
- parent_core = parent->core;
-
- /* Optimize for the case where the parent is already the parent. */
- if (core->parent == parent_core)
- return true;
-
- return match_string(core->parent_names, core->num_parents,
- parent_core->name) >= 0;
+ return clk_core_has_parent(clk->core, parent->core);
}
EXPORT_SYMBOL_GPL(clk_has_parent);
@@ -2310,7 +2867,7 @@ static int clk_core_set_parent_nolock(struct clk_core *core,
if (core->parent == parent)
return 0;
- /* verify ops for for multi-parent clks */
+ /* verify ops for multi-parent clks */
if (core->num_parents > 1 && !core->ops->set_parent)
return -EPERM;
@@ -2348,9 +2905,9 @@ static int clk_core_set_parent_nolock(struct clk_core *core,
/* propagate rate an accuracy recalculation accordingly */
if (ret) {
- __clk_recalc_rates(core, ABORT_RATE_CHANGE);
+ __clk_recalc_rates(core, true, ABORT_RATE_CHANGE);
} else {
- __clk_recalc_rates(core, POST_RATE_CHANGE);
+ __clk_recalc_rates(core, true, POST_RATE_CHANGE);
__clk_recalc_accuracies(core);
}
@@ -2360,6 +2917,12 @@ runtime_put:
return ret;
}
+int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
+{
+ return clk_core_set_parent_nolock(hw->core, parent->core);
+}
+EXPORT_SYMBOL_GPL(clk_hw_set_parent);
+
/**
* clk_set_parent - switch the parent of a mux clk
* @clk: the mux clk whose input we are switching
@@ -2478,12 +3041,14 @@ static int clk_core_get_phase(struct clk_core *core)
{
int ret;
- clk_prepare_lock();
+ lockdep_assert_held(&prepare_lock);
+ if (!core->ops->get_phase)
+ return 0;
+
/* Always try to update cached phase if possible */
- if (core->ops->get_phase)
- core->phase = core->ops->get_phase(core->hw);
- ret = core->phase;
- clk_prepare_unlock();
+ ret = core->ops->get_phase(core->hw);
+ if (ret >= 0)
+ core->phase = ret;
return ret;
}
@@ -2497,10 +3062,16 @@ static int clk_core_get_phase(struct clk_core *core)
*/
int clk_get_phase(struct clk *clk)
{
+ int ret;
+
if (!clk)
return 0;
- return clk_core_get_phase(clk->core);
+ clk_prepare_lock();
+ ret = clk_core_get_phase(clk->core);
+ clk_prepare_unlock();
+
+ return ret;
}
EXPORT_SYMBOL_GPL(clk_get_phase);
@@ -2706,12 +3277,6 @@ static int inited = 0;
static DEFINE_MUTEX(clk_debug_lock);
static HLIST_HEAD(clk_debug_list);
-static struct hlist_head *all_lists[] = {
- &clk_root_list,
- &clk_orphan_list,
- NULL,
-};
-
static struct hlist_head *orphan_list[] = {
&clk_orphan_list,
NULL,
@@ -2720,16 +3285,42 @@ static struct hlist_head *orphan_list[] = {
static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
int level)
{
- if (!c)
- return;
+ int phase;
+ struct clk *clk_user;
+ int multi_node = 0;
- seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
+ seq_printf(s, "%*s%-*s %-7d %-8d %-8d %-11lu %-10lu ",
level * 3 + 1, "",
- 30 - level * 3, c->name,
+ 35 - level * 3, c->name,
c->enable_count, c->prepare_count, c->protect_count,
- clk_core_get_rate(c), clk_core_get_accuracy(c),
- clk_core_get_phase(c),
- clk_core_get_scaled_duty_cycle(c, 100000));
+ clk_core_get_rate_recalc(c),
+ clk_core_get_accuracy_recalc(c));
+
+ phase = clk_core_get_phase(c);
+ if (phase >= 0)
+ seq_printf(s, "%-5d", phase);
+ else
+ seq_puts(s, "-----");
+
+ seq_printf(s, " %-6d", clk_core_get_scaled_duty_cycle(c, 100000));
+
+ if (c->ops->is_enabled)
+ seq_printf(s, " %5c ", clk_core_is_enabled(c) ? 'Y' : 'N');
+ else if (!c->ops->enable)
+ seq_printf(s, " %5c ", 'Y');
+ else
+ seq_printf(s, " %5c ", '?');
+
+ hlist_for_each_entry(clk_user, &c->clks, clks_node) {
+ seq_printf(s, "%*s%-*s %-25s\n",
+ level * 3 + 2 + 105 * multi_node, "",
+ 30,
+ clk_user->dev_id ? clk_user->dev_id : "deviceless",
+ clk_user->con_id ? clk_user->con_id : "no_connection_id");
+
+ multi_node = 1;
+ }
+
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
@@ -2737,9 +3328,6 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
{
struct clk_core *child;
- if (!c)
- return;
-
clk_summary_show_one(s, c, level);
hlist_for_each_entry(child, &c->children, child_node)
@@ -2749,11 +3337,16 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk_core *c;
- struct hlist_head **lists = (struct hlist_head **)s->private;
+ struct hlist_head **lists = s->private;
+ int ret;
- seq_puts(s, " enable prepare protect duty\n");
- seq_puts(s, " clock count count count rate accuracy phase cycle\n");
- seq_puts(s, "---------------------------------------------------------------------------------------------\n");
+ seq_puts(s, " enable prepare protect duty hardware connection\n");
+ seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n");
+ seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
+
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
clk_prepare_lock();
@@ -2762,6 +3355,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
clk_summary_show_subtree(s, c, 0);
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
return 0;
}
@@ -2769,17 +3363,23 @@ DEFINE_SHOW_ATTRIBUTE(clk_summary);
static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
{
- if (!c)
- return;
+ int phase;
+ unsigned long min_rate, max_rate;
+
+ clk_core_get_boundaries(c, &min_rate, &max_rate);
/* This should be JSON format, i.e. elements separated with a comma */
seq_printf(s, "\"%s\": { ", c->name);
seq_printf(s, "\"enable_count\": %d,", c->enable_count);
seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
seq_printf(s, "\"protect_count\": %d,", c->protect_count);
- seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
- seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
- seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
+ seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c));
+ seq_printf(s, "\"min_rate\": %lu,", min_rate);
+ seq_printf(s, "\"max_rate\": %lu,", max_rate);
+ seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c));
+ phase = clk_core_get_phase(c);
+ if (phase >= 0)
+ seq_printf(s, "\"phase\": %d,", phase);
seq_printf(s, "\"duty_cycle\": %u",
clk_core_get_scaled_duty_cycle(c, 100000));
}
@@ -2788,9 +3388,6 @@ static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
{
struct clk_core *child;
- if (!c)
- return;
-
clk_dump_one(s, c, level);
hlist_for_each_entry(child, &c->children, child_node) {
@@ -2805,9 +3402,15 @@ static int clk_dump_show(struct seq_file *s, void *data)
{
struct clk_core *c;
bool first_node = true;
- struct hlist_head **lists = (struct hlist_head **)s->private;
+ struct hlist_head **lists = s->private;
+ int ret;
+
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
seq_putc(s, '{');
+
clk_prepare_lock();
for (; *lists; lists++) {
@@ -2820,12 +3423,104 @@ static int clk_dump_show(struct seq_file *s, void *data)
}
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
seq_puts(s, "}\n");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(clk_dump);
+#undef CLOCK_ALLOW_WRITE_DEBUGFS
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
+/*
+ * This can be dangerous, therefore don't provide any real compile time
+ * configuration option for this feature.
+ * People who want to use this will need to modify the source code directly.
+ */
+static int clk_rate_set(void *data, u64 val)
+{
+ struct clk_core *core = data;
+ int ret;
+
+ clk_prepare_lock();
+ ret = clk_core_set_rate_nolock(core, val);
+ clk_prepare_unlock();
+
+ return ret;
+}
+
+#define clk_rate_mode 0644
+
+static int clk_phase_set(void *data, u64 val)
+{
+ struct clk_core *core = data;
+ int degrees = do_div(val, 360);
+ int ret;
+
+ clk_prepare_lock();
+ ret = clk_core_set_phase_nolock(core, degrees);
+ clk_prepare_unlock();
+
+ return ret;
+}
+
+#define clk_phase_mode 0644
+
+static int clk_prepare_enable_set(void *data, u64 val)
+{
+ struct clk_core *core = data;
+ int ret = 0;
+
+ if (val)
+ ret = clk_prepare_enable(core->hw->clk);
+ else
+ clk_disable_unprepare(core->hw->clk);
+
+ return ret;
+}
+
+static int clk_prepare_enable_get(void *data, u64 *val)
+{
+ struct clk_core *core = data;
+
+ *val = core->enable_count && core->prepare_count;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get,
+ clk_prepare_enable_set, "%llu\n");
+
+#else
+#define clk_rate_set NULL
+#define clk_rate_mode 0444
+
+#define clk_phase_set NULL
+#define clk_phase_mode 0644
+#endif
+
+static int clk_rate_get(void *data, u64 *val)
+{
+ struct clk_core *core = data;
+
+ clk_prepare_lock();
+ *val = clk_core_get_rate_recalc(core);
+ clk_prepare_unlock();
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
+
+static int clk_phase_get(void *data, u64 *val)
+{
+ struct clk_core *core = data;
+
+ *val = core->phase;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(clk_phase_fops, clk_phase_get, clk_phase_set, "%llu\n");
+
static const struct {
unsigned long flag;
const char *name;
@@ -2835,7 +3530,6 @@ static const struct {
ENTRY(CLK_SET_PARENT_GATE),
ENTRY(CLK_SET_RATE_PARENT),
ENTRY(CLK_IGNORE_UNUSED),
- ENTRY(CLK_IS_BASIC),
ENTRY(CLK_GET_RATE_NOCACHE),
ENTRY(CLK_SET_RATE_NO_REPARENT),
ENTRY(CLK_GET_ACCURACY_NOCACHE),
@@ -2868,20 +3562,104 @@ static int clk_flags_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(clk_flags);
+static void possible_parent_show(struct seq_file *s, struct clk_core *core,
+ unsigned int i, char terminator)
+{
+ struct clk_core *parent;
+ const char *name = NULL;
+
+ /*
+ * Go through the following options to fetch a parent's name.
+ *
+ * 1. Fetch the registered parent clock and use its name
+ * 2. Use the global (fallback) name if specified
+ * 3. Use the local fw_name if provided
+ * 4. Fetch parent clock's clock-output-name if DT index was set
+ *
+ * This may still fail in some cases, such as when the parent is
+ * specified directly via a struct clk_hw pointer, but it isn't
+ * registered (yet).
+ */
+ parent = clk_core_get_parent_by_index(core, i);
+ if (parent) {
+ seq_puts(s, parent->name);
+ } else if (core->parents[i].name) {
+ seq_puts(s, core->parents[i].name);
+ } else if (core->parents[i].fw_name) {
+ seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
+ } else {
+ if (core->parents[i].index >= 0)
+ name = of_clk_get_parent_name(core->of_node, core->parents[i].index);
+ if (!name)
+ name = "(missing)";
+
+ seq_puts(s, name);
+ }
+
+ seq_putc(s, terminator);
+}
+
static int possible_parents_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
int i;
for (i = 0; i < core->num_parents - 1; i++)
- seq_printf(s, "%s ", core->parent_names[i]);
+ possible_parent_show(s, core, i, ' ');
- seq_printf(s, "%s\n", core->parent_names[i]);
+ possible_parent_show(s, core, i, '\n');
return 0;
}
DEFINE_SHOW_ATTRIBUTE(possible_parents);
+static int current_parent_show(struct seq_file *s, void *data)
+{
+ struct clk_core *core = s->private;
+
+ if (core->parent)
+ seq_printf(s, "%s\n", core->parent->name);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(current_parent);
+
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
+static ssize_t current_parent_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct clk_core *core = s->private;
+ struct clk_core *parent;
+ u8 idx;
+ int err;
+
+ err = kstrtou8_from_user(ubuf, count, 0, &idx);
+ if (err < 0)
+ return err;
+
+ parent = clk_core_get_parent_by_index(core, idx);
+ if (!parent)
+ return -ENOENT;
+
+ clk_prepare_lock();
+ err = clk_core_set_parent_nolock(core, parent);
+ clk_prepare_unlock();
+ if (err)
+ return err;
+
+ return count;
+}
+
+static const struct file_operations current_parent_rw_fops = {
+ .open = current_parent_open,
+ .write = current_parent_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
static int clk_duty_cycle_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
@@ -2893,6 +3671,34 @@ static int clk_duty_cycle_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
+static int clk_min_rate_show(struct seq_file *s, void *data)
+{
+ struct clk_core *core = s->private;
+ unsigned long min_rate, max_rate;
+
+ clk_prepare_lock();
+ clk_core_get_boundaries(core, &min_rate, &max_rate);
+ clk_prepare_unlock();
+ seq_printf(s, "%lu\n", min_rate);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
+
+static int clk_max_rate_show(struct seq_file *s, void *data)
+{
+ struct clk_core *core = s->private;
+ unsigned long min_rate, max_rate;
+
+ clk_prepare_lock();
+ clk_core_get_boundaries(core, &min_rate, &max_rate);
+ clk_prepare_unlock();
+ seq_printf(s, "%lu\n", max_rate);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
+
static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
{
struct dentry *root;
@@ -2903,9 +3709,13 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
root = debugfs_create_dir(core->name, pdentry);
core->dentry = root;
- debugfs_create_ulong("clk_rate", 0444, root, &core->rate);
+ debugfs_create_file("clk_rate", clk_rate_mode, root, core,
+ &clk_rate_fops);
+ debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
+ debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
- debugfs_create_u32("clk_phase", 0444, root, &core->phase);
+ debugfs_create_file("clk_phase", clk_phase_mode, root, core,
+ &clk_phase_fops);
debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
@@ -2913,6 +3723,18 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
debugfs_create_file("clk_duty_cycle", 0444, root, core,
&clk_duty_cycle_fops);
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
+ debugfs_create_file("clk_prepare_enable", 0644, root, core,
+ &clk_prepare_enable_fops);
+
+ if (core->num_parents > 1)
+ debugfs_create_file("clk_parent", 0644, root, core,
+ &current_parent_rw_fops);
+ else
+#endif
+ if (core->num_parents > 0)
+ debugfs_create_file("clk_parent", 0444, root, core,
+ &current_parent_fops);
if (core->num_parents > 1)
debugfs_create_file("clk_possible_parents", 0444, root, core,
@@ -2969,6 +3791,24 @@ static int __init clk_debug_init(void)
{
struct clk_core *core;
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
+ pr_warn("\n");
+ pr_warn("********************************************************************\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("** **\n");
+ pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
+ pr_warn("** **\n");
+ pr_warn("** This means that this kernel is built to expose clk operations **\n");
+ pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n");
+ pr_warn("** to userspace, which may compromise security on your system. **\n");
+ pr_warn("** **\n");
+ pr_warn("** If you see this message and you are not debugging the **\n");
+ pr_warn("** kernel, report this immediately to your vendor! **\n");
+ pr_warn("** **\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("********************************************************************\n");
+#endif
+
rootdir = debugfs_create_dir("clk", NULL);
debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
@@ -2992,15 +3832,52 @@ static int __init clk_debug_init(void)
late_initcall(clk_debug_init);
#else
static inline void clk_debug_register(struct clk_core *core) { }
-static inline void clk_debug_reparent(struct clk_core *core,
- struct clk_core *new_parent)
-{
-}
static inline void clk_debug_unregister(struct clk_core *core)
{
}
#endif
+static void clk_core_reparent_orphans_nolock(void)
+{
+ struct clk_core *orphan;
+ struct hlist_node *tmp2;
+
+ /*
+ * walk the list of orphan clocks and reparent any that newly finds a
+ * parent.
+ */
+ hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
+ struct clk_core *parent = __clk_init_parent(orphan);
+
+ /*
+ * We need to use __clk_set_parent_before() and _after() to
+ * properly migrate any prepare/enable count of the orphan
+ * clock. This is important for CLK_IS_CRITICAL clocks, which
+ * are enabled during init but might not have a parent yet.
+ */
+ if (parent) {
+ /* update the clk tree topology */
+ __clk_set_parent_before(orphan, parent);
+ __clk_set_parent_after(orphan, parent, NULL);
+ __clk_recalc_accuracies(orphan);
+ __clk_recalc_rates(orphan, true, 0);
+
+ /*
+ * __clk_init_parent() will set the initial req_rate to
+ * 0 if the clock doesn't have clk_ops::recalc_rate and
+ * is an orphan when it's registered.
+ *
+ * 'req_rate' is used by clk_set_rate_range() and
+ * clk_put() to trigger a clk_set_rate() call whenever
+ * the boundaries are modified. Let's make sure
+ * 'req_rate' is set to something non-zero so that
+ * clk_set_rate_range() doesn't drop the frequency.
+ */
+ orphan->req_rate = orphan->rate;
+ }
+ }
+}
+
/**
* __clk_core_init - initialize the data structures in a struct clk_core
* @core: clk_core being initialized
@@ -3010,16 +3887,21 @@ static inline void clk_debug_unregister(struct clk_core *core)
*/
static int __clk_core_init(struct clk_core *core)
{
- int i, ret;
- struct clk_core *orphan;
- struct hlist_node *tmp2;
+ int ret;
+ struct clk_core *parent;
unsigned long rate;
-
- if (!core)
- return -EINVAL;
+ int phase;
clk_prepare_lock();
+ /*
+ * Set hw->core after grabbing the prepare_lock to synchronize with
+ * callers of clk_core_fill_parent_index() where we treat hw->core
+ * being NULL as the clk not being registered yet. This is crucial so
+ * that clks aren't parented until their parent is fully registered.
+ */
+ core->hw->core = core;
+
ret = clk_pm_runtime_get(core);
if (ret)
goto unlock;
@@ -3049,6 +3931,13 @@ static int __clk_core_init(struct clk_core *core)
goto out;
}
+ if (core->ops->set_parent && !core->ops->determine_rate) {
+ pr_err("%s: %s must implement .set_parent & .determine_rate\n",
+ __func__, core->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
if (core->num_parents > 1 && !core->ops->get_parent) {
pr_err("%s: %s must implement .get_parent as it has multi parents\n",
__func__, core->name);
@@ -3064,13 +3953,27 @@ static int __clk_core_init(struct clk_core *core)
goto out;
}
- /* throw a WARN if any entries in parent_names are NULL */
- for (i = 0; i < core->num_parents; i++)
- WARN(!core->parent_names[i],
- "%s: invalid NULL in %s's .parent_names\n",
- __func__, core->name);
+ /*
+ * optional platform-specific magic
+ *
+ * The .init callback is not used by any of the basic clock types, but
+ * exists for weird hardware that must perform initialization magic for
+ * CCF to get an accurate view of clock for any other callbacks. It may
+ * also be used needs to perform dynamic allocations. Such allocation
+ * must be freed in the terminate() callback.
+ * This callback shall not be used to initialize the parameters state,
+ * such as rate, parent, etc ...
+ *
+ * If it exist, this callback should called before any other callback of
+ * the clock
+ */
+ if (core->ops->init) {
+ ret = core->ops->init(core->hw);
+ if (ret)
+ goto out;
+ }
- core->parent = __clk_init_parent(core);
+ parent = core->parent = __clk_init_parent(core);
/*
* Populate core->parent if parent has already been clk_core_init'd. If
@@ -3082,10 +3985,9 @@ static int __clk_core_init(struct clk_core *core)
* clocks and re-parent any that are children of the clock currently
* being clk_init'd.
*/
- if (core->parent) {
- hlist_add_head(&core->child_node,
- &core->parent->children);
- core->orphan = core->parent->orphan;
+ if (parent) {
+ hlist_add_head(&core->child_node, &parent->children);
+ core->orphan = parent->orphan;
} else if (!core->num_parents) {
hlist_add_head(&core->child_node, &clk_root_list);
core->orphan = false;
@@ -3093,17 +3995,8 @@ static int __clk_core_init(struct clk_core *core)
hlist_add_head(&core->child_node, &clk_orphan_list);
core->orphan = true;
}
-
- /*
- * optional platform-specific magic
- *
- * The .init callback is not used by any of the basic clock types, but
- * exists for weird hardware that must perform initialization magic.
- * Please consider other ways of solving initialization problems before
- * using this callback, as its use is discouraged.
- */
- if (core->ops->init)
- core->ops->init(core->hw);
+ hash_add(clk_hashtable, &core->hashtable_node,
+ full_name_hash(NULL, core->name, strlen(core->name)));
/*
* Set clk's accuracy. The preferred method is to use
@@ -3114,21 +4007,24 @@ static int __clk_core_init(struct clk_core *core)
*/
if (core->ops->recalc_accuracy)
core->accuracy = core->ops->recalc_accuracy(core->hw,
- __clk_get_accuracy(core->parent));
- else if (core->parent)
- core->accuracy = core->parent->accuracy;
+ clk_core_get_accuracy_no_lock(parent));
+ else if (parent)
+ core->accuracy = parent->accuracy;
else
core->accuracy = 0;
/*
- * Set clk's phase.
+ * Set clk's phase by clk_core_get_phase() caching the phase.
* Since a phase is by definition relative to its parent, just
* query the current clock phase, or just assume it's in phase.
*/
- if (core->ops->get_phase)
- core->phase = core->ops->get_phase(core->hw);
- else
- core->phase = 0;
+ phase = clk_core_get_phase(core);
+ if (phase < 0) {
+ ret = phase;
+ pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
+ core->name);
+ goto out;
+ }
/*
* Set clk's duty cycle.
@@ -3143,9 +4039,9 @@ static int __clk_core_init(struct clk_core *core)
*/
if (core->ops->recalc_rate)
rate = core->ops->recalc_rate(core->hw,
- clk_core_get_rate_nolock(core->parent));
- else if (core->parent)
- rate = core->parent->rate;
+ clk_core_get_rate_nolock(parent));
+ else if (parent)
+ rate = parent->rate;
else
rate = 0;
core->rate = core->req_rate = rate;
@@ -3156,41 +4052,32 @@ static int __clk_core_init(struct clk_core *core)
* reparenting clocks
*/
if (core->flags & CLK_IS_CRITICAL) {
- unsigned long flags;
-
- clk_core_prepare(core);
-
- flags = clk_enable_lock();
- clk_core_enable(core);
- clk_enable_unlock(flags);
- }
-
- /*
- * walk the list of orphan clocks and reparent any that newly finds a
- * parent.
- */
- hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
- struct clk_core *parent = __clk_init_parent(orphan);
+ ret = clk_core_prepare(core);
+ if (ret) {
+ pr_warn("%s: critical clk '%s' failed to prepare\n",
+ __func__, core->name);
+ goto out;
+ }
- /*
- * We need to use __clk_set_parent_before() and _after() to
- * to properly migrate any prepare/enable count of the orphan
- * clock. This is important for CLK_IS_CRITICAL clocks, which
- * are enabled during init but might not have a parent yet.
- */
- if (parent) {
- /* update the clk tree topology */
- __clk_set_parent_before(orphan, parent);
- __clk_set_parent_after(orphan, parent, NULL);
- __clk_recalc_accuracies(orphan);
- __clk_recalc_rates(orphan, 0);
+ ret = clk_core_enable_lock(core);
+ if (ret) {
+ pr_warn("%s: critical clk '%s' failed to enable\n",
+ __func__, core->name);
+ clk_core_unprepare(core);
+ goto out;
}
}
- kref_init(&core->ref);
+ clk_core_reparent_orphans_nolock();
out:
clk_pm_runtime_put(core);
unlock:
+ if (ret) {
+ hash_del(&core->hashtable_node);
+ hlist_del_init(&core->child_node);
+ core->hw->core = NULL;
+ }
+
clk_prepare_unlock();
if (!ret)
@@ -3199,57 +4086,247 @@ unlock:
return ret;
}
-struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
+/**
+ * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
+ * @core: clk to add consumer to
+ * @clk: consumer to link to a clk
+ */
+static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
+{
+ clk_prepare_lock();
+ hlist_add_head(&clk->clks_node, &core->clks);
+ clk_prepare_unlock();
+}
+
+/**
+ * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
+ * @clk: consumer to unlink
+ */
+static void clk_core_unlink_consumer(struct clk *clk)
+{
+ lockdep_assert_held(&prepare_lock);
+ hlist_del(&clk->clks_node);
+}
+
+/**
+ * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
+ * @core: clk to allocate a consumer for
+ * @dev_id: string describing device name
+ * @con_id: connection ID string on device
+ *
+ * Returns: clk consumer left unlinked from the consumer list
+ */
+static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
const char *con_id)
{
struct clk *clk;
- /* This is to allow this function to be chained to others */
- if (IS_ERR_OR_NULL(hw))
- return ERR_CAST(hw);
-
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
if (!clk)
return ERR_PTR(-ENOMEM);
- clk->core = hw->core;
+ clk->core = core;
clk->dev_id = dev_id;
clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
clk->max_rate = ULONG_MAX;
- clk_prepare_lock();
- hlist_add_head(&clk->clks_node, &hw->core->clks);
- clk_prepare_unlock();
-
return clk;
}
-/* keep in sync with __clk_put */
-void __clk_free_clk(struct clk *clk)
+/**
+ * free_clk - Free a clk consumer
+ * @clk: clk consumer to free
+ *
+ * Note, this assumes the clk has been unlinked from the clk_core consumer
+ * list.
+ */
+static void free_clk(struct clk *clk)
{
- clk_prepare_lock();
- hlist_del(&clk->clks_node);
- clk_prepare_unlock();
-
kfree_const(clk->con_id);
kfree(clk);
}
/**
- * clk_register - allocate a new clock, register it and return an opaque cookie
- * @dev: device that is registering this clock
- * @hw: link to hardware-specific clock data
+ * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
+ * a clk_hw
+ * @dev: clk consumer device
+ * @hw: clk_hw associated with the clk being consumed
+ * @dev_id: string describing device name
+ * @con_id: connection ID string on device
*
- * clk_register is the primary interface for populating the clock tree with new
- * clock nodes. It returns a pointer to the newly allocated struct clk which
- * cannot be dereferenced by driver code but may be used in conjunction with the
- * rest of the clock API. In the event of an error clk_register will return an
- * error code; drivers must test for an error code after calling clk_register.
+ * This is the main function used to create a clk pointer for use by clk
+ * consumers. It connects a consumer to the clk_core and clk_hw structures
+ * used by the framework and clk provider respectively.
*/
-struct clk *clk_register(struct device *dev, struct clk_hw *hw)
+struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
+ const char *dev_id, const char *con_id)
+{
+ struct clk *clk;
+ struct clk_core *core;
+
+ /* This is to allow this function to be chained to others */
+ if (IS_ERR_OR_NULL(hw))
+ return ERR_CAST(hw);
+
+ core = hw->core;
+ clk = alloc_clk(core, dev_id, con_id);
+ if (IS_ERR(clk))
+ return clk;
+ clk->dev = dev;
+
+ if (!try_module_get(core->owner)) {
+ free_clk(clk);
+ return ERR_PTR(-ENOENT);
+ }
+
+ kref_get(&core->ref);
+ clk_core_link_consumer(core, clk);
+
+ return clk;
+}
+
+/**
+ * clk_hw_get_clk - get clk consumer given an clk_hw
+ * @hw: clk_hw associated with the clk being consumed
+ * @con_id: connection ID string on device
+ *
+ * Returns: new clk consumer
+ * This is the function to be used by providers which need
+ * to get a consumer clk and act on the clock element
+ * Calls to this function must be balanced with calls clk_put()
+ */
+struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id)
+{
+ struct device *dev = hw->core->dev;
+ const char *name = dev ? dev_name(dev) : NULL;
+
+ return clk_hw_create_clk(dev, hw, name, con_id);
+}
+EXPORT_SYMBOL(clk_hw_get_clk);
+
+static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
+{
+ const char *dst;
+
+ if (!src) {
+ if (must_exist)
+ return -EINVAL;
+ return 0;
+ }
+
+ *dst_p = dst = kstrdup_const(src, GFP_KERNEL);
+ if (!dst)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int clk_core_populate_parent_map(struct clk_core *core,
+ const struct clk_init_data *init)
+{
+ u8 num_parents = init->num_parents;
+ const char * const *parent_names = init->parent_names;
+ const struct clk_hw **parent_hws = init->parent_hws;
+ const struct clk_parent_data *parent_data = init->parent_data;
+ int i, ret = 0;
+ struct clk_parent_map *parents, *parent;
+
+ if (!num_parents)
+ return 0;
+
+ /*
+ * Avoid unnecessary string look-ups of clk_core's possible parents by
+ * having a cache of names/clk_hw pointers to clk_core pointers.
+ */
+ parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
+ core->parents = parents;
+ if (!parents)
+ return -ENOMEM;
+
+ /* Copy everything over because it might be __initdata */
+ for (i = 0, parent = parents; i < num_parents; i++, parent++) {
+ parent->index = -1;
+ if (parent_names) {
+ /* throw a WARN if any entries are NULL */
+ WARN(!parent_names[i],
+ "%s: invalid NULL in %s's .parent_names\n",
+ __func__, core->name);
+ ret = clk_cpy_name(&parent->name, parent_names[i],
+ true);
+ } else if (parent_data) {
+ parent->hw = parent_data[i].hw;
+ parent->index = parent_data[i].index;
+ ret = clk_cpy_name(&parent->fw_name,
+ parent_data[i].fw_name, false);
+ if (!ret)
+ ret = clk_cpy_name(&parent->name,
+ parent_data[i].name,
+ false);
+ } else if (parent_hws) {
+ parent->hw = parent_hws[i];
+ } else {
+ ret = -EINVAL;
+ WARN(1, "Must specify parents if num_parents > 0\n");
+ }
+
+ if (ret) {
+ do {
+ kfree_const(parents[i].name);
+ kfree_const(parents[i].fw_name);
+ } while (--i >= 0);
+ kfree(parents);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void clk_core_free_parent_map(struct clk_core *core)
+{
+ int i = core->num_parents;
+
+ if (!core->num_parents)
+ return;
+
+ while (--i >= 0) {
+ kfree_const(core->parents[i].name);
+ kfree_const(core->parents[i].fw_name);
+ }
+
+ kfree(core->parents);
+}
+
+/* Free memory allocated for a struct clk_core */
+static void __clk_release(struct kref *ref)
+{
+ struct clk_core *core = container_of(ref, struct clk_core, ref);
+
+ if (core->rpm_enabled) {
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_del(&core->rpm_node);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+
+ clk_core_free_parent_map(core);
+ kfree_const(core->name);
+ kfree(core);
+}
+
+static struct clk *
+__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
- int i, ret;
+ int ret;
struct clk_core *core;
+ const struct clk_init_data *init = hw->init;
+
+ /*
+ * The init data is not supposed to be used outside of registration path.
+ * Set it to NULL so that provider drivers can't use it either and so that
+ * we catch use of hw->init early on in the core.
+ */
+ hw->init = NULL;
core = kzalloc(sizeof(*core), GFP_KERNEL);
if (!core) {
@@ -3257,86 +4334,115 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
goto fail_out;
}
- core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
+ kref_init(&core->ref);
+
+ core->name = kstrdup_const(init->name, GFP_KERNEL);
if (!core->name) {
ret = -ENOMEM;
goto fail_name;
}
- if (WARN_ON(!hw->init->ops)) {
+ if (WARN_ON(!init->ops)) {
ret = -EINVAL;
goto fail_ops;
}
- core->ops = hw->init->ops;
+ core->ops = init->ops;
- if (dev && pm_runtime_enabled(dev))
- core->dev = dev;
+ core->dev = dev;
+ clk_pm_runtime_init(core);
+ core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
core->hw = hw;
- core->flags = hw->init->flags;
- core->num_parents = hw->init->num_parents;
+ core->flags = init->flags;
+ core->num_parents = init->num_parents;
core->min_rate = 0;
core->max_rate = ULONG_MAX;
- hw->core = core;
-
- /* allocate local copy in case parent_names is __initdata */
- core->parent_names = kcalloc(core->num_parents, sizeof(char *),
- GFP_KERNEL);
- if (!core->parent_names) {
- ret = -ENOMEM;
- goto fail_parent_names;
- }
-
-
- /* copy each string name in case parent_names is __initdata */
- for (i = 0; i < core->num_parents; i++) {
- core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
- GFP_KERNEL);
- if (!core->parent_names[i]) {
- ret = -ENOMEM;
- goto fail_parent_names_copy;
- }
- }
-
- /* avoid unnecessary string look-ups of clk_core's possible parents. */
- core->parents = kcalloc(core->num_parents, sizeof(*core->parents),
- GFP_KERNEL);
- if (!core->parents) {
- ret = -ENOMEM;
+ ret = clk_core_populate_parent_map(core, init);
+ if (ret)
goto fail_parents;
- };
INIT_HLIST_HEAD(&core->clks);
- hw->clk = __clk_create_clk(hw, NULL, NULL);
+ /*
+ * Don't call clk_hw_create_clk() here because that would pin the
+ * provider module to itself and prevent it from ever being removed.
+ */
+ hw->clk = alloc_clk(core, NULL, NULL);
if (IS_ERR(hw->clk)) {
ret = PTR_ERR(hw->clk);
- goto fail_parents;
+ goto fail_create_clk;
}
+ clk_core_link_consumer(core, hw->clk);
+
ret = __clk_core_init(core);
if (!ret)
return hw->clk;
- __clk_free_clk(hw->clk);
+ clk_prepare_lock();
+ clk_core_unlink_consumer(hw->clk);
+ clk_prepare_unlock();
+
+ free_clk(hw->clk);
hw->clk = NULL;
+fail_create_clk:
fail_parents:
- kfree(core->parents);
-fail_parent_names_copy:
- while (--i >= 0)
- kfree_const(core->parent_names[i]);
- kfree(core->parent_names);
-fail_parent_names:
fail_ops:
- kfree_const(core->name);
fail_name:
- kfree(core);
+ kref_put(&core->ref, __clk_release);
fail_out:
+ if (dev) {
+ dev_err_probe(dev, ret, "failed to register clk '%s' (%pS)\n",
+ init->name, hw);
+ } else {
+ pr_err("%pOF: error %pe: failed to register clk '%s' (%pS)\n",
+ np, ERR_PTR(ret), init->name, hw);
+ }
return ERR_PTR(ret);
}
+
+/**
+ * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
+ * @dev: Device to get device node of
+ *
+ * Return: device node pointer of @dev, or the device node pointer of
+ * @dev->parent if dev doesn't have a device node, or NULL if neither
+ * @dev or @dev->parent have a device node.
+ */
+static struct device_node *dev_or_parent_of_node(struct device *dev)
+{
+ struct device_node *np;
+
+ if (!dev)
+ return NULL;
+
+ np = dev_of_node(dev);
+ if (!np)
+ np = dev_of_node(dev->parent);
+
+ return np;
+}
+
+/**
+ * clk_register - allocate a new clock, register it and return an opaque cookie
+ * @dev: device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * clk_register is the *deprecated* interface for populating the clock tree with
+ * new clock nodes. Use clk_hw_register() instead.
+ *
+ * Returns: a pointer to the newly allocated struct clk which
+ * cannot be dereferenced by driver code but may be used in conjunction with the
+ * rest of the clock API. In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
+ */
+struct clk *clk_register(struct device *dev, struct clk_hw *hw)
+{
+ return __clk_register(dev, dev_or_parent_of_node(dev), hw);
+}
EXPORT_SYMBOL_GPL(clk_register);
/**
@@ -3351,26 +4457,27 @@ EXPORT_SYMBOL_GPL(clk_register);
*/
int clk_hw_register(struct device *dev, struct clk_hw *hw)
{
- return PTR_ERR_OR_ZERO(clk_register(dev, hw));
+ return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
+ hw));
}
EXPORT_SYMBOL_GPL(clk_hw_register);
-/* Free memory allocated for a clock. */
-static void __clk_release(struct kref *ref)
+/*
+ * of_clk_hw_register - register a clk_hw and return an error code
+ * @node: device_node of device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * of_clk_hw_register() is the primary interface for populating the clock tree
+ * with new clock nodes when a struct device is not available, but a struct
+ * device_node is. It returns an integer equal to zero indicating success or
+ * less than zero indicating failure. Drivers must test for an error code after
+ * calling of_clk_hw_register().
+ */
+int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
{
- struct clk_core *core = container_of(ref, struct clk_core, ref);
- int i = core->num_parents;
-
- lockdep_assert_held(&prepare_lock);
-
- kfree(core->parents);
- while (--i >= 0)
- kfree_const(core->parent_names[i]);
-
- kfree(core->parent_names);
- kfree_const(core->name);
- kfree(core);
+ return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
}
+EXPORT_SYMBOL_GPL(of_clk_hw_register);
/*
* Empty clk_ops for unregistered clocks. These are used temporarily
@@ -3398,15 +4505,50 @@ static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
return -ENXIO;
}
+static int clk_nodrv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return -ENXIO;
+}
+
static const struct clk_ops clk_nodrv_ops = {
.enable = clk_nodrv_prepare_enable,
.disable = clk_nodrv_disable_unprepare,
.prepare = clk_nodrv_prepare_enable,
.unprepare = clk_nodrv_disable_unprepare,
+ .determine_rate = clk_nodrv_determine_rate,
.set_rate = clk_nodrv_set_rate,
.set_parent = clk_nodrv_set_parent,
};
+static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
+ const struct clk_core *target)
+{
+ int i;
+ struct clk_core *child;
+
+ for (i = 0; i < root->num_parents; i++)
+ if (root->parents[i].core == target)
+ root->parents[i].core = NULL;
+
+ hlist_for_each_entry(child, &root->children, child_node)
+ clk_core_evict_parent_cache_subtree(child, target);
+}
+
+/* Remove this clk from all parent caches */
+static void clk_core_evict_parent_cache(struct clk_core *core)
+{
+ const struct hlist_head **lists;
+ struct clk_core *root;
+
+ lockdep_assert_held(&prepare_lock);
+
+ for (lists = all_lists; *lists; lists++)
+ hlist_for_each_entry(root, *lists, child_node)
+ clk_core_evict_parent_cache_subtree(root, core);
+
+}
+
/**
* clk_unregister - unregister a currently registered clock
* @clk: clock to unregister
@@ -3414,6 +4556,7 @@ static const struct clk_ops clk_nodrv_ops = {
void clk_unregister(struct clk *clk)
{
unsigned long flags;
+ const struct clk_ops *ops;
if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
return;
@@ -3422,10 +4565,12 @@ void clk_unregister(struct clk *clk)
clk_prepare_lock();
- if (clk->core->ops == &clk_nodrv_ops) {
+ ops = clk->core->ops;
+ if (ops == &clk_nodrv_ops) {
pr_err("%s: unregistered clock: %s\n", __func__,
clk->core->name);
- goto unlock;
+ clk_prepare_unlock();
+ return;
}
/*
* Assign empty clock ops for consumers that might still hold
@@ -3435,6 +4580,9 @@ void clk_unregister(struct clk *clk)
clk->core->ops = &clk_nodrv_ops;
clk_enable_unlock(flags);
+ if (ops->terminate)
+ ops->terminate(clk->core->hw);
+
if (!hlist_empty(&clk->core->children)) {
struct clk_core *child;
struct hlist_node *t;
@@ -3445,6 +4593,9 @@ void clk_unregister(struct clk *clk)
clk_core_set_parent_nolock(child, NULL);
}
+ clk_core_evict_parent_cache(clk->core);
+
+ hash_del(&clk->core->hashtable_node);
hlist_del_init(&clk->core->child_node);
if (clk->core->prepare_count)
@@ -3454,10 +4605,10 @@ void clk_unregister(struct clk *clk)
if (clk->core->protect_count)
pr_warn("%s: unregistering protected clock: %s\n",
__func__, clk->core->name);
+ clk_prepare_unlock();
kref_put(&clk->core->ref, __clk_release);
-unlock:
- clk_prepare_unlock();
+ free_clk(clk);
}
EXPORT_SYMBOL_GPL(clk_unregister);
@@ -3471,12 +4622,12 @@ void clk_hw_unregister(struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_unregister);
-static void devm_clk_release(struct device *dev, void *res)
+static void devm_clk_unregister_cb(struct device *dev, void *res)
{
clk_unregister(*(struct clk **)res);
}
-static void devm_clk_hw_release(struct device *dev, void *res)
+static void devm_clk_hw_unregister_cb(struct device *dev, void *res)
{
clk_hw_unregister(*(struct clk_hw **)res);
}
@@ -3486,16 +4637,17 @@ static void devm_clk_hw_release(struct device *dev, void *res)
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
*
- * Managed clk_register(). Clocks returned from this function are
- * automatically clk_unregister()ed on driver detach. See clk_register() for
- * more information.
+ * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
+ *
+ * Clocks returned from this function are automatically clk_unregister()ed on
+ * driver detach. See clk_register() for more information.
*/
struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
{
struct clk *clk;
struct clk **clkp;
- clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
+ clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL);
if (!clkp)
return ERR_PTR(-ENOMEM);
@@ -3525,7 +4677,7 @@ int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
struct clk_hw **hwp;
int ret;
- hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
+ hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL);
if (!hwp)
return -ENOMEM;
@@ -3541,70 +4693,53 @@ int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register);
-static int devm_clk_match(struct device *dev, void *res, void *data)
-{
- struct clk *c = res;
- if (WARN_ON(!c))
- return 0;
- return c == data;
-}
-
-static int devm_clk_hw_match(struct device *dev, void *res, void *data)
+static void devm_clk_release(struct device *dev, void *res)
{
- struct clk_hw *hw = res;
-
- if (WARN_ON(!hw))
- return 0;
- return hw == data;
+ clk_put(*(struct clk **)res);
}
/**
- * devm_clk_unregister - resource managed clk_unregister()
- * @clk: clock to unregister
+ * devm_clk_hw_get_clk - resource managed clk_hw_get_clk()
+ * @dev: device that is registering this clock
+ * @hw: clk_hw associated with the clk being consumed
+ * @con_id: connection ID string on device
*
- * Deallocate a clock allocated with devm_clk_register(). Normally
- * this function will not need to be called and the resource management
- * code will ensure that the resource is freed.
+ * Managed clk_hw_get_clk(). Clocks got with this function are
+ * automatically clk_put() on driver detach. See clk_put()
+ * for more information.
*/
-void devm_clk_unregister(struct device *dev, struct clk *clk)
+struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
+ const char *con_id)
{
- WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
-}
-EXPORT_SYMBOL_GPL(devm_clk_unregister);
+ struct clk *clk;
+ struct clk **clkp;
-/**
- * devm_clk_hw_unregister - resource managed clk_hw_unregister()
- * @dev: device that is unregistering the hardware-specific clock data
- * @hw: link to hardware-specific clock data
- *
- * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
- * this function will not need to be called and the resource management
- * code will ensure that the resource is freed.
- */
-void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
-{
- WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
- hw));
+ /* This should not happen because it would mean we have drivers
+ * passing around clk_hw pointers instead of having the caller use
+ * proper clk_get() style APIs
+ */
+ WARN_ON_ONCE(dev != hw->core->dev);
+
+ clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
+ if (!clkp)
+ return ERR_PTR(-ENOMEM);
+
+ clk = clk_hw_get_clk(hw, con_id);
+ if (!IS_ERR(clk)) {
+ *clkp = clk;
+ devres_add(dev, clkp);
+ } else {
+ devres_free(clkp);
+ }
+
+ return clk;
}
-EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
+EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk);
/*
* clkdev helpers
*/
-int __clk_get(struct clk *clk)
-{
- struct clk_core *core = !clk ? NULL : clk->core;
-
- if (core) {
- if (!try_module_get(core->owner))
- return 0;
-
- kref_get(&core->ref);
- }
- return 1;
-}
-/* keep in sync with __clk_free_clk */
void __clk_put(struct clk *clk)
{
struct module *owner;
@@ -3626,20 +4761,18 @@ void __clk_put(struct clk *clk)
clk->exclusive_count = 0;
}
- hlist_del(&clk->clks_node);
- if (clk->min_rate > clk->core->req_rate ||
- clk->max_rate < clk->core->req_rate)
- clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
+ clk_core_unlink_consumer(clk);
- owner = clk->core->owner;
- kref_put(&clk->core->ref, __clk_release);
+ /* If we had any boundaries on that clock, let's drop them. */
+ if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
+ clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
clk_prepare_unlock();
+ owner = clk->core->owner;
+ kref_put(&clk->core->ref, __clk_release);
module_put(owner);
-
- kfree_const(clk->con_id);
- kfree(clk);
+ free_clk(clk);
}
/*** clk rate change notifiers ***/
@@ -3677,20 +4810,19 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
/* search the list of notifiers for this clk */
list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk)
- break;
+ goto found;
/* if clk wasn't in the notifier list, allocate new clk_notifier */
- if (cn->clk != clk) {
- cn = kzalloc(sizeof(*cn), GFP_KERNEL);
- if (!cn)
- goto out;
+ cn = kzalloc(sizeof(*cn), GFP_KERNEL);
+ if (!cn)
+ goto out;
- cn->clk = clk;
- srcu_init_notifier_head(&cn->notifier_head);
+ cn->clk = clk;
+ srcu_init_notifier_head(&cn->notifier_head);
- list_add(&cn->node, &clk_notifier_list);
- }
+ list_add(&cn->node, &clk_notifier_list);
+found:
ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
clk->core->notifier_count++;
@@ -3715,32 +4847,28 @@ EXPORT_SYMBOL_GPL(clk_notifier_register);
*/
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
{
- struct clk_notifier *cn = NULL;
- int ret = -EINVAL;
+ struct clk_notifier *cn;
+ int ret = -ENOENT;
if (!clk || !nb)
return -EINVAL;
clk_prepare_lock();
- list_for_each_entry(cn, &clk_notifier_list, node)
- if (cn->clk == clk)
- break;
-
- if (cn->clk == clk) {
- ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
+ list_for_each_entry(cn, &clk_notifier_list, node) {
+ if (cn->clk == clk) {
+ ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
- clk->core->notifier_count--;
+ clk->core->notifier_count--;
- /* XXX the notifier code should handle this better */
- if (!cn->notifier_head.head) {
- srcu_cleanup_notifier_head(&cn->notifier_head);
- list_del(&cn->node);
- kfree(cn);
+ /* XXX the notifier code should handle this better */
+ if (!cn->notifier_head.head) {
+ srcu_cleanup_notifier_head(&cn->notifier_head);
+ list_del(&cn->node);
+ kfree(cn);
+ }
+ break;
}
-
- } else {
- ret = -ENOENT;
}
clk_prepare_unlock();
@@ -3749,13 +4877,59 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(clk_notifier_unregister);
+struct clk_notifier_devres {
+ struct clk *clk;
+ struct notifier_block *nb;
+};
+
+static void devm_clk_notifier_release(struct device *dev, void *res)
+{
+ struct clk_notifier_devres *devres = res;
+
+ clk_notifier_unregister(devres->clk, devres->nb);
+}
+
+int devm_clk_notifier_register(struct device *dev, struct clk *clk,
+ struct notifier_block *nb)
+{
+ struct clk_notifier_devres *devres;
+ int ret;
+
+ devres = devres_alloc(devm_clk_notifier_release,
+ sizeof(*devres), GFP_KERNEL);
+
+ if (!devres)
+ return -ENOMEM;
+
+ ret = clk_notifier_register(clk, nb);
+ if (!ret) {
+ devres->clk = clk;
+ devres->nb = nb;
+ devres_add(dev, devres);
+ } else {
+ devres_free(devres);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_clk_notifier_register);
+
#ifdef CONFIG_OF
+static void clk_core_reparent_orphans(void)
+{
+ clk_prepare_lock();
+ clk_core_reparent_orphans_nolock();
+ clk_prepare_unlock();
+}
+
/**
* struct of_clk_provider - Clock provider registration structure
* @link: Entry in global list of clock providers
* @node: Pointer to device tree node of clock provider
* @get: Get clock callback. Returns NULL or a struct clk for the
* given clock specifier
+ * @get_hw: Get clk_hw callback. Returns NULL, ERR_PTR or a
+ * struct clk_hw for the given clock specifier
* @data: context pointer to be passed into @get callback
*/
struct of_clk_provider {
@@ -3767,8 +4941,9 @@ struct of_clk_provider {
void *data;
};
+extern struct of_device_id __clk_of_table;
static const struct of_device_id __clk_of_table_sentinel
- __used __section(__clk_of_table_end);
+ __used __section("__clk_of_table_end");
static LIST_HEAD(of_clk_providers);
static DEFINE_MUTEX(of_clk_mutex);
@@ -3820,6 +4995,8 @@ EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
* @np: Device node pointer associated with clock provider
* @clk_src_get: callback for decoding clock
* @data: context pointer for @clk_src_get callback.
+ *
+ * This function is *deprecated*. Use of_clk_add_hw_provider() instead.
*/
int of_clk_add_provider(struct device_node *np,
struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
@@ -3829,6 +5006,9 @@ int of_clk_add_provider(struct device_node *np,
struct of_clk_provider *cp;
int ret;
+ if (!np)
+ return 0;
+
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
@@ -3842,10 +5022,14 @@ int of_clk_add_provider(struct device_node *np,
mutex_unlock(&of_clk_mutex);
pr_debug("Added clock from %pOF\n", np);
+ clk_core_reparent_orphans();
+
ret = of_clk_set_defaults(np, true);
if (ret < 0)
of_clk_del_provider(np);
+ fwnode_dev_initialized(&np->fwnode, true);
+
return ret;
}
EXPORT_SYMBOL_GPL(of_clk_add_provider);
@@ -3864,6 +5048,9 @@ int of_clk_add_hw_provider(struct device_node *np,
struct of_clk_provider *cp;
int ret;
+ if (!np)
+ return 0;
+
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
@@ -3877,10 +5064,14 @@ int of_clk_add_hw_provider(struct device_node *np,
mutex_unlock(&of_clk_mutex);
pr_debug("Added clk_hw provider from %pOF\n", np);
+ clk_core_reparent_orphans();
+
ret = of_clk_set_defaults(np, true);
if (ret < 0)
of_clk_del_provider(np);
+ fwnode_dev_initialized(&np->fwnode, true);
+
return ret;
}
EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
@@ -3902,8 +5093,8 @@ static struct device_node *get_clk_provider_node(struct device *dev)
np = dev->of_node;
parent_np = dev->parent ? dev->parent->of_node : NULL;
- if (!of_find_property(np, "#clock-cells", NULL))
- if (of_find_property(parent_np, "#clock-cells", NULL))
+ if (!of_property_present(np, "#clock-cells"))
+ if (of_property_present(parent_np, "#clock-cells"))
np = parent_np;
return np;
@@ -3957,10 +5148,14 @@ void of_clk_del_provider(struct device_node *np)
{
struct of_clk_provider *cp;
+ if (!np)
+ return;
+
mutex_lock(&of_clk_mutex);
list_for_each_entry(cp, &of_clk_providers, link) {
if (cp->node == np) {
list_del(&cp->link);
+ fwnode_dev_initialized(&np->fwnode, false);
of_node_put(cp->node);
kfree(cp);
break;
@@ -3970,31 +5165,79 @@ void of_clk_del_provider(struct device_node *np)
}
EXPORT_SYMBOL_GPL(of_clk_del_provider);
-static int devm_clk_provider_match(struct device *dev, void *res, void *data)
-{
- struct device_node **np = res;
-
- if (WARN_ON(!np || !*np))
- return 0;
-
- return *np == data;
-}
-
/**
- * devm_of_clk_del_provider() - Remove clock provider registered using devm
- * @dev: Device to whose lifetime the clock provider was bound
+ * of_parse_clkspec() - Parse a DT clock specifier for a given device node
+ * @np: device node to parse clock specifier from
+ * @index: index of phandle to parse clock out of. If index < 0, @name is used
+ * @name: clock name to find and parse. If name is NULL, the index is used
+ * @out_args: Result of parsing the clock specifier
+ *
+ * Parses a device node's "clocks" and "clock-names" properties to find the
+ * phandle and cells for the index or name that is desired. The resulting clock
+ * specifier is placed into @out_args, or an errno is returned when there's a
+ * parsing error. The @index argument is ignored if @name is non-NULL.
+ *
+ * Example:
+ *
+ * phandle1: clock-controller@1 {
+ * #clock-cells = <2>;
+ * }
+ *
+ * phandle2: clock-controller@2 {
+ * #clock-cells = <1>;
+ * }
+ *
+ * clock-consumer@3 {
+ * clocks = <&phandle1 1 2 &phandle2 3>;
+ * clock-names = "name1", "name2";
+ * }
+ *
+ * To get a device_node for `clock-controller@2' node you may call this
+ * function a few different ways:
+ *
+ * of_parse_clkspec(clock-consumer@3, -1, "name2", &args);
+ * of_parse_clkspec(clock-consumer@3, 1, NULL, &args);
+ * of_parse_clkspec(clock-consumer@3, 1, "name2", &args);
+ *
+ * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT
+ * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in
+ * the "clock-names" property of @np.
*/
-void devm_of_clk_del_provider(struct device *dev)
+static int of_parse_clkspec(const struct device_node *np, int index,
+ const char *name, struct of_phandle_args *out_args)
{
- int ret;
- struct device_node *np = get_clk_provider_node(dev);
+ int ret = -ENOENT;
- ret = devres_release(dev, devm_of_clk_release_provider,
- devm_clk_provider_match, np);
+ /* Walk up the tree of devices looking for a clock property that matches */
+ while (np) {
+ /*
+ * For named clocks, first look up the name in the
+ * "clock-names" property. If it cannot be found, then index
+ * will be an error code and of_parse_phandle_with_args() will
+ * return -EINVAL.
+ */
+ if (name)
+ index = of_property_match_string(np, "clock-names", name);
+ ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
+ index, out_args);
+ if (!ret)
+ break;
+ if (name && index >= 0)
+ break;
- WARN_ON(ret);
+ /*
+ * No matching clock found on this node. If the parent node
+ * has a "clock-ranges" property, then we can try one of its
+ * clocks.
+ */
+ np = np->parent;
+ if (np && !of_property_present(np, "clock-ranges"))
+ break;
+ index = 0;
+ }
+
+ return ret;
}
-EXPORT_SYMBOL(devm_of_clk_del_provider);
static struct clk_hw *
__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
@@ -4011,36 +5254,30 @@ __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
return __clk_get_hw(clk);
}
-struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
- const char *dev_id, const char *con_id)
+static struct clk_hw *
+of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
{
struct of_clk_provider *provider;
- struct clk *clk = ERR_PTR(-EPROBE_DEFER);
- struct clk_hw *hw;
+ struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
if (!clkspec)
return ERR_PTR(-EINVAL);
- /* Check if we have such a provider in our array */
+ /* Check if node in clkspec is in disabled/fail state */
+ if (!of_device_is_available(clkspec->np))
+ return ERR_PTR(-ENOENT);
+
mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np) {
hw = __of_clk_get_hw_from_provider(provider, clkspec);
- clk = __clk_create_clk(hw, dev_id, con_id);
- }
-
- if (!IS_ERR(clk)) {
- if (!__clk_get(clk)) {
- __clk_free_clk(clk);
- clk = ERR_PTR(-ENOENT);
- }
-
- break;
+ if (!IS_ERR(hw))
+ break;
}
}
mutex_unlock(&of_clk_mutex);
- return clk;
+ return hw;
}
/**
@@ -4053,17 +5290,69 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
*/
struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
- return __of_clk_get_from_provider(clkspec, NULL, __func__);
+ struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
+
+ return clk_hw_create_clk(NULL, hw, NULL, __func__);
}
EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
+struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
+ const char *con_id)
+{
+ int ret;
+ struct clk_hw *hw;
+ struct of_phandle_args clkspec;
+
+ ret = of_parse_clkspec(np, index, con_id, &clkspec);
+ if (ret)
+ return ERR_PTR(ret);
+
+ hw = of_clk_get_hw_from_clkspec(&clkspec);
+ of_node_put(clkspec.np);
+
+ return hw;
+}
+
+static struct clk *__of_clk_get(struct device_node *np,
+ int index, const char *dev_id,
+ const char *con_id)
+{
+ struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
+
+ return clk_hw_create_clk(NULL, hw, dev_id, con_id);
+}
+
+struct clk *of_clk_get(struct device_node *np, int index)
+{
+ return __of_clk_get(np, index, np->full_name, NULL);
+}
+EXPORT_SYMBOL(of_clk_get);
+
+/**
+ * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
+ * @np: pointer to clock consumer node
+ * @name: name of consumer's clock input, or NULL for the first clock reference
+ *
+ * This function parses the clocks and clock-names properties,
+ * and uses them to look up the struct clk from the registered list of clock
+ * providers.
+ */
+struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
+{
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ return __of_clk_get(np, 0, np->full_name, name);
+}
+EXPORT_SYMBOL(of_clk_get_by_name);
+
/**
* of_clk_get_parent_count() - Count the number of clocks a device node has
* @np: device node to count
*
* Returns: The number of clocks that are possible parents of this node
*/
-unsigned int of_clk_get_parent_count(struct device_node *np)
+unsigned int of_clk_get_parent_count(const struct device_node *np)
{
int count;
@@ -4075,12 +5364,11 @@ unsigned int of_clk_get_parent_count(struct device_node *np)
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
-const char *of_clk_get_parent_name(struct device_node *np, int index)
+const char *of_clk_get_parent_name(const struct device_node *np, int index)
{
struct of_phandle_args clkspec;
- struct property *prop;
const char *clk_name;
- const __be32 *vp;
+ bool found = false;
u32 pv;
int rc;
int count;
@@ -4097,16 +5385,19 @@ const char *of_clk_get_parent_name(struct device_node *np, int index)
/* if there is an indices property, use it to transfer the index
* specified into an array offset for the clock-output-names property.
*/
- of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
+ of_property_for_each_u32(clkspec.np, "clock-indices", pv) {
if (index == pv) {
index = count;
+ found = true;
break;
}
count++;
}
/* We went off the end of 'clock-indices' without finding it */
- if (prop && !vp)
+ if (of_property_present(clkspec.np, "clock-indices") && !found) {
+ of_node_put(clkspec.np);
return NULL;
+ }
if (of_property_read_string_index(clkspec.np, "clock-output-names",
index,
@@ -4215,17 +5506,15 @@ static int parent_ready(struct device_node *np)
*
* Return: error code or zero on success
*/
-int of_clk_detect_critical(struct device_node *np,
- int index, unsigned long *flags)
+int of_clk_detect_critical(struct device_node *np, int index,
+ unsigned long *flags)
{
- struct property *prop;
- const __be32 *cur;
uint32_t idx;
if (!np || !flags)
return -EINVAL;
- of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
+ of_property_for_each_u32(np, "clock-critical", idx)
if (index == idx)
*flags |= CLK_IS_CRITICAL;