summaryrefslogtreecommitdiff
path: root/drivers/clk/clk.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clk/clk.c')
-rw-r--r--drivers/clk/clk.c1008
1 files changed, 691 insertions, 317 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 8de6a22498e7..85d2f2481acf 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -6,21 +6,24 @@
* Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
*/
+#include <linux/clk/clk-conf.h>
+#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clk/clk-conf.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
+#include <linux/device.h>
#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
#include <linux/list.h>
-#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/device.h>
-#include <linux/init.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/stringhash.h>
#include "clk.h"
@@ -33,11 +36,18 @@ static struct task_struct *enable_owner;
static int prepare_refcnt;
static int enable_refcnt;
+#define CLK_HASH_BITS 9
+static DEFINE_HASHTABLE(clk_hashtable, CLK_HASH_BITS);
+
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
-static struct hlist_head *all_lists[] = {
+/* List of registered clks that use runtime PM */
+static HLIST_HEAD(clk_rpm_list);
+static DEFINE_MUTEX(clk_rpm_list_lock);
+
+static const struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
NULL,
@@ -59,6 +69,7 @@ struct clk_core {
struct clk_hw *hw;
struct module *owner;
struct device *dev;
+ struct hlist_node rpm_node;
struct device_node *of_node;
struct clk_core *parent;
struct clk_parent_map *parents;
@@ -82,6 +93,7 @@ struct clk_core {
struct clk_duty duty;
struct hlist_head children;
struct hlist_node child_node;
+ struct hlist_node hashtable_node;
struct hlist_head clks;
unsigned int notifier_count;
#ifdef CONFIG_DEBUG_FS
@@ -108,17 +120,10 @@ struct clk {
/*** runtime pm ***/
static int clk_pm_runtime_get(struct clk_core *core)
{
- int ret;
-
if (!core->rpm_enabled)
return 0;
- ret = pm_runtime_get_sync(core->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(core->dev);
- return ret;
- }
- return 0;
+ return pm_runtime_resume_and_get(core->dev);
}
static void clk_pm_runtime_put(struct clk_core *core)
@@ -129,6 +134,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
pm_runtime_put_sync(core->dev);
}
+/**
+ * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
+ *
+ * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
+ * that disabling unused clks avoids a deadlock where a device is runtime PM
+ * resuming/suspending and the runtime PM callback is trying to grab the
+ * prepare_lock for something like clk_prepare_enable() while
+ * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
+ * PM resume/suspend the device as well.
+ *
+ * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
+ * success. Otherwise the lock is released on failure.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int clk_pm_runtime_get_all(void)
+{
+ int ret;
+ struct clk_core *core, *failed;
+
+ /*
+ * Grab the list lock to prevent any new clks from being registered
+ * or unregistered until clk_pm_runtime_put_all().
+ */
+ mutex_lock(&clk_rpm_list_lock);
+
+ /*
+ * Runtime PM "get" all the devices that are needed for the clks
+ * currently registered. Do this without holding the prepare_lock, to
+ * avoid the deadlock.
+ */
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ ret = clk_pm_runtime_get(core);
+ if (ret) {
+ failed = core;
+ pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
+ dev_name(failed->dev), failed->name);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ if (core == failed)
+ break;
+
+ clk_pm_runtime_put(core);
+ }
+ mutex_unlock(&clk_rpm_list_lock);
+
+ return ret;
+}
+
+/**
+ * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
+ *
+ * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
+ * the 'clk_rpm_list_lock'.
+ */
+static void clk_pm_runtime_put_all(void)
+{
+ struct clk_core *core;
+
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
+ clk_pm_runtime_put(core);
+ mutex_unlock(&clk_rpm_list_lock);
+}
+
+static void clk_pm_runtime_init(struct clk_core *core)
+{
+ struct device *dev = core->dev;
+
+ if (dev && pm_runtime_enabled(dev)) {
+ core->rpm_enabled = true;
+
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_add_head(&core->rpm_node, &clk_rpm_list);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+}
+
/*** locking ***/
static void clk_prepare_lock(void)
{
@@ -251,6 +339,17 @@ static bool clk_core_is_enabled(struct clk_core *core)
}
}
+ /*
+ * This could be called with the enable lock held, or from atomic
+ * context. If the parent isn't enabled already, we can't do
+ * anything here. We can also assume this clock isn't enabled.
+ */
+ if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent)
+ if (!clk_core_is_enabled(core->parent)) {
+ ret = false;
+ goto done;
+ }
+
ret = core->ops->is_enabled(core->hw);
done:
if (core->rpm_enabled)
@@ -273,6 +372,18 @@ const char *clk_hw_get_name(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_name);
+struct device *clk_hw_get_dev(const struct clk_hw *hw)
+{
+ return hw->core->dev;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_dev);
+
+struct device_node *clk_hw_get_of_node(const struct clk_hw *hw)
+{
+ return hw->core->of_node;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_of_node);
+
struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->core->hw;
@@ -291,45 +402,20 @@ struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent);
-static struct clk_core *__clk_lookup_subtree(const char *name,
- struct clk_core *core)
-{
- struct clk_core *child;
- struct clk_core *ret;
-
- if (!strcmp(core->name, name))
- return core;
-
- hlist_for_each_entry(child, &core->children, child_node) {
- ret = __clk_lookup_subtree(name, child);
- if (ret)
- return ret;
- }
-
- return NULL;
-}
-
static struct clk_core *clk_core_lookup(const char *name)
{
- struct clk_core *root_clk;
- struct clk_core *ret;
+ struct clk_core *core;
+ u32 hash;
if (!name)
return NULL;
- /* search the 'proper' clk tree first */
- hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
- ret = __clk_lookup_subtree(name, root_clk);
- if (ret)
- return ret;
- }
+ hash = full_name_hash(NULL, name, strlen(name));
- /* if not found, then search the orphan tree */
- hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
- ret = __clk_lookup_subtree(name, root_clk);
- if (ret)
- return ret;
- }
+ /* search the hashtable */
+ hash_for_each_possible(clk_hashtable, core, hashtable_node, hash)
+ if (!strcmp(core->name, name))
+ return core;
return NULL;
}
@@ -414,6 +500,9 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
if (IS_ERR(hw))
return ERR_CAST(hw);
+ if (!hw)
+ return NULL;
+
return hw->core;
}
@@ -513,12 +602,6 @@ bool clk_hw_is_prepared(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
-bool clk_hw_rate_is_protected(const struct clk_hw *hw)
-{
- return clk_core_rate_is_protected(hw->core);
-}
-EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
-
bool clk_hw_is_enabled(const struct clk_hw *hw)
{
return clk_core_is_enabled(hw->core);
@@ -543,6 +626,94 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now,
return now <= rate && now > best;
}
+static void clk_core_init_rate_req(struct clk_core * const core,
+ struct clk_rate_request *req,
+ unsigned long rate);
+
+static int clk_core_round_rate_nolock(struct clk_core *core,
+ struct clk_rate_request *req);
+
+static bool clk_core_has_parent(struct clk_core *core, const struct clk_core *parent)
+{
+ struct clk_core *tmp;
+ unsigned int i;
+
+ /* Optimize for the case where the parent is already the parent. */
+ if (core->parent == parent)
+ return true;
+
+ for (i = 0; i < core->num_parents; i++) {
+ tmp = clk_core_get_parent_by_index(core, i);
+ if (!tmp)
+ continue;
+
+ if (tmp == parent)
+ return true;
+ }
+
+ return false;
+}
+
+static void
+clk_core_forward_rate_req(struct clk_core *core,
+ const struct clk_rate_request *old_req,
+ struct clk_core *parent,
+ struct clk_rate_request *req,
+ unsigned long parent_rate)
+{
+ if (WARN_ON(!clk_core_has_parent(core, parent)))
+ return;
+
+ clk_core_init_rate_req(parent, req, parent_rate);
+
+ if (req->min_rate < old_req->min_rate)
+ req->min_rate = old_req->min_rate;
+
+ if (req->max_rate > old_req->max_rate)
+ req->max_rate = old_req->max_rate;
+}
+
+static int
+clk_core_determine_rate_no_reparent(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_core *core = hw->core;
+ struct clk_core *parent = core->parent;
+ unsigned long best;
+ int ret;
+
+ if (core->flags & CLK_SET_RATE_PARENT) {
+ struct clk_rate_request parent_req;
+
+ if (!parent) {
+ req->rate = 0;
+ return 0;
+ }
+
+ clk_core_forward_rate_req(core, req, parent, &parent_req,
+ req->rate);
+
+ trace_clk_rate_request_start(&parent_req);
+
+ ret = clk_core_round_rate_nolock(parent, &parent_req);
+ if (ret)
+ return ret;
+
+ trace_clk_rate_request_done(&parent_req);
+
+ best = parent_req.rate;
+ } else if (parent) {
+ best = clk_core_get_rate_nolock(parent);
+ } else {
+ best = clk_core_get_rate_nolock(core);
+ }
+
+ req->best_parent_rate = best;
+ req->rate = best;
+
+ return 0;
+}
+
int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_rate_request *req,
unsigned long flags)
@@ -550,56 +721,49 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_core *core = hw->core, *parent, *best_parent = NULL;
int i, num_parents, ret;
unsigned long best = 0;
- struct clk_rate_request parent_req = *req;
/* if NO_REPARENT flag set, pass through to current parent */
- if (core->flags & CLK_SET_RATE_NO_REPARENT) {
- parent = core->parent;
- if (core->flags & CLK_SET_RATE_PARENT) {
- ret = __clk_determine_rate(parent ? parent->hw : NULL,
- &parent_req);
- if (ret)
- return ret;
-
- best = parent_req.rate;
- } else if (parent) {
- best = clk_core_get_rate_nolock(parent);
- } else {
- best = clk_core_get_rate_nolock(core);
- }
-
- goto out;
- }
+ if (core->flags & CLK_SET_RATE_NO_REPARENT)
+ return clk_core_determine_rate_no_reparent(hw, req);
/* find the parent that can provide the fastest rate <= rate */
num_parents = core->num_parents;
for (i = 0; i < num_parents; i++) {
+ unsigned long parent_rate;
+
parent = clk_core_get_parent_by_index(core, i);
if (!parent)
continue;
if (core->flags & CLK_SET_RATE_PARENT) {
- parent_req = *req;
- ret = __clk_determine_rate(parent->hw, &parent_req);
+ struct clk_rate_request parent_req;
+
+ clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
+
+ trace_clk_rate_request_start(&parent_req);
+
+ ret = clk_core_round_rate_nolock(parent, &parent_req);
if (ret)
continue;
+
+ trace_clk_rate_request_done(&parent_req);
+
+ parent_rate = parent_req.rate;
} else {
- parent_req.rate = clk_core_get_rate_nolock(parent);
+ parent_rate = clk_core_get_rate_nolock(parent);
}
- if (mux_is_better_rate(req->rate, parent_req.rate,
+ if (mux_is_better_rate(req->rate, parent_rate,
best, flags)) {
best_parent = parent;
- best = parent_req.rate;
+ best = parent_rate;
}
}
if (!best_parent)
return -EINVAL;
-out:
- if (best_parent)
- req->best_parent_hw = best_parent->hw;
+ req->best_parent_hw = best_parent->hw;
req->best_parent_rate = best;
req->rate = best;
@@ -632,6 +796,40 @@ static void clk_core_get_boundaries(struct clk_core *core,
*max_rate = min(*max_rate, clk_user->max_rate);
}
+/*
+ * clk_hw_get_rate_range() - returns the clock rate range for a hw clk
+ * @hw: the hw clk we want to get the range from
+ * @min_rate: pointer to the variable that will hold the minimum
+ * @max_rate: pointer to the variable that will hold the maximum
+ *
+ * Fills the @min_rate and @max_rate variables with the minimum and
+ * maximum that clock can reach.
+ */
+void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
+ unsigned long *max_rate)
+{
+ clk_core_get_boundaries(hw->core, min_rate, max_rate);
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_rate_range);
+
+static bool clk_core_check_boundaries(struct clk_core *core,
+ unsigned long min_rate,
+ unsigned long max_rate)
+{
+ struct clk *user;
+
+ lockdep_assert_held(&prepare_lock);
+
+ if (min_rate > core->max_rate || max_rate < core->min_rate)
+ return false;
+
+ hlist_for_each_entry(user, &core->clks, clks_node)
+ if (min_rate > user->max_rate || max_rate < user->min_rate)
+ return false;
+
+ return true;
+}
+
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
unsigned long max_rate)
{
@@ -665,6 +863,25 @@ int __clk_mux_determine_rate_closest(struct clk_hw *hw,
}
EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
+/*
+ * clk_hw_determine_rate_no_reparent - clk_ops::determine_rate implementation for a clk that doesn't reparent
+ * @hw: mux type clk to determine rate on
+ * @req: rate request, also used to return preferred frequency
+ *
+ * Helper for finding best parent rate to provide a given frequency.
+ * This can be used directly as a determine_rate callback (e.g. for a
+ * mux), or from a more complex clock that may combine a mux with other
+ * operations.
+ *
+ * Returns: 0 on success, -EERROR value on error
+ */
+int clk_hw_determine_rate_no_reparent(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return clk_core_determine_rate_no_reparent(hw, req);
+}
+EXPORT_SYMBOL_GPL(clk_hw_determine_rate_no_reparent);
+
/*** clk api ***/
static void clk_core_rate_unprotect(struct clk_core *core)
@@ -801,6 +1018,25 @@ int clk_rate_exclusive_get(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
+static void devm_clk_rate_exclusive_put(void *data)
+{
+ struct clk *clk = data;
+
+ clk_rate_exclusive_put(clk);
+}
+
+int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk)
+{
+ int ret;
+
+ ret = clk_rate_exclusive_get(clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_clk_rate_exclusive_put, clk);
+}
+EXPORT_SYMBOL_GPL(devm_clk_rate_exclusive_get);
+
static void clk_core_unprepare(struct clk_core *core)
{
lockdep_assert_held(&prepare_lock);
@@ -829,10 +1065,9 @@ static void clk_core_unprepare(struct clk_core *core)
if (core->ops->unprepare)
core->ops->unprepare(core->hw);
- clk_pm_runtime_put(core);
-
trace_clk_unprepare_complete(core);
clk_core_unprepare(core->parent);
+ clk_pm_runtime_put(core);
}
static void clk_core_unprepare_lock(struct clk_core *core)
@@ -960,12 +1195,12 @@ static void clk_core_disable(struct clk_core *core)
if (--core->enable_count > 0)
return;
- trace_clk_disable_rcuidle(core);
+ trace_clk_disable(core);
if (core->ops->disable)
core->ops->disable(core->hw);
- trace_clk_disable_complete_rcuidle(core);
+ trace_clk_disable_complete(core);
clk_core_disable(core->parent);
}
@@ -1019,12 +1254,12 @@ static int clk_core_enable(struct clk_core *core)
if (ret)
return ret;
- trace_clk_enable_rcuidle(core);
+ trace_clk_enable(core);
if (core->ops->enable)
ret = core->ops->enable(core->hw);
- trace_clk_enable_complete_rcuidle(core);
+ trace_clk_enable_complete(core);
if (ret) {
clk_core_disable(core->parent);
@@ -1222,9 +1457,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED)
return;
- if (clk_pm_runtime_get(core))
- return;
-
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
@@ -1233,8 +1465,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
-
- clk_pm_runtime_put(core);
}
static void __init clk_disable_unused_subtree(struct clk_core *core)
@@ -1250,9 +1480,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
- if (clk_pm_runtime_get(core))
- goto unprepare_out;
-
flags = clk_enable_lock();
if (core->enable_count)
@@ -1277,8 +1504,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
unlock_out:
clk_enable_unlock(flags);
- clk_pm_runtime_put(core);
-unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
@@ -1294,12 +1519,22 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
static int __init clk_disable_unused(void)
{
struct clk_core *core;
+ int ret;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
return 0;
}
+ pr_info("clk: Disabling unused clocks\n");
+
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
+ /*
+ * Grab the prepare lock to keep the clk topology stable while iterating
+ * over clks.
+ */
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
@@ -1316,6 +1551,8 @@ static int __init clk_disable_unused(void)
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
+
return 0;
}
late_initcall_sync(clk_disable_unused);
@@ -1331,6 +1568,20 @@ static int clk_core_determine_round_nolock(struct clk_core *core,
return 0;
/*
+ * Some clock providers hand-craft their clk_rate_requests and
+ * might not fill min_rate and max_rate.
+ *
+ * If it's the case, clamping the rate is equivalent to setting
+ * the rate to 0 which is bad. Skip the clamping but complain so
+ * that it gets fixed, hopefully.
+ */
+ if (!req->min_rate && !req->max_rate)
+ pr_warn("%s: %s: clk_rate_request has initialized min or max rate.\n",
+ __func__, core->name);
+ else
+ req->rate = clamp(req->rate, req->min_rate, req->max_rate);
+
+ /*
* At this point, core protection will be disabled
* - if the provider is not protected at all
* - if the calling consumer is the only one which has exclusivity
@@ -1355,13 +1606,24 @@ static int clk_core_determine_round_nolock(struct clk_core *core,
}
static void clk_core_init_rate_req(struct clk_core * const core,
- struct clk_rate_request *req)
+ struct clk_rate_request *req,
+ unsigned long rate)
{
struct clk_core *parent;
- if (WARN_ON(!core || !req))
+ if (WARN_ON(!req))
+ return;
+
+ memset(req, 0, sizeof(*req));
+ req->max_rate = ULONG_MAX;
+
+ if (!core)
return;
+ req->core = core;
+ req->rate = rate;
+ clk_core_get_boundaries(core, &req->min_rate, &req->max_rate);
+
parent = core->parent;
if (parent) {
req->best_parent_hw = parent->hw;
@@ -1372,6 +1634,52 @@ static void clk_core_init_rate_req(struct clk_core * const core,
}
}
+/**
+ * clk_hw_init_rate_request - Initializes a clk_rate_request
+ * @hw: the clk for which we want to submit a rate request
+ * @req: the clk_rate_request structure we want to initialise
+ * @rate: the rate which is to be requested
+ *
+ * Initializes a clk_rate_request structure to submit to
+ * __clk_determine_rate() or similar functions.
+ */
+void clk_hw_init_rate_request(const struct clk_hw *hw,
+ struct clk_rate_request *req,
+ unsigned long rate)
+{
+ if (WARN_ON(!hw || !req))
+ return;
+
+ clk_core_init_rate_req(hw->core, req, rate);
+}
+EXPORT_SYMBOL_GPL(clk_hw_init_rate_request);
+
+/**
+ * clk_hw_forward_rate_request - Forwards a clk_rate_request to a clock's parent
+ * @hw: the original clock that got the rate request
+ * @old_req: the original clk_rate_request structure we want to forward
+ * @parent: the clk we want to forward @old_req to
+ * @req: the clk_rate_request structure we want to initialise
+ * @parent_rate: The rate which is to be requested to @parent
+ *
+ * Initializes a clk_rate_request structure to submit to a clock parent
+ * in __clk_determine_rate() or similar functions.
+ */
+void clk_hw_forward_rate_request(const struct clk_hw *hw,
+ const struct clk_rate_request *old_req,
+ const struct clk_hw *parent,
+ struct clk_rate_request *req,
+ unsigned long parent_rate)
+{
+ if (WARN_ON(!hw || !old_req || !parent || !req))
+ return;
+
+ clk_core_forward_rate_req(hw->core, old_req,
+ parent->core, req,
+ parent_rate);
+}
+EXPORT_SYMBOL_GPL(clk_hw_forward_rate_request);
+
static bool clk_core_can_round(struct clk_core * const core)
{
return core->ops->determine_rate || core->ops->round_rate;
@@ -1380,6 +1688,8 @@ static bool clk_core_can_round(struct clk_core * const core)
static int clk_core_round_rate_nolock(struct clk_core *core,
struct clk_rate_request *req)
{
+ int ret;
+
lockdep_assert_held(&prepare_lock);
if (!core) {
@@ -1387,12 +1697,27 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
return 0;
}
- clk_core_init_rate_req(core, req);
-
if (clk_core_can_round(core))
return clk_core_determine_round_nolock(core, req);
- else if (core->flags & CLK_SET_RATE_PARENT)
- return clk_core_round_rate_nolock(core->parent, req);
+
+ if (core->flags & CLK_SET_RATE_PARENT) {
+ struct clk_rate_request parent_req;
+
+ clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate);
+
+ trace_clk_rate_request_start(&parent_req);
+
+ ret = clk_core_round_rate_nolock(core->parent, &parent_req);
+ if (ret)
+ return ret;
+
+ trace_clk_rate_request_done(&parent_req);
+
+ req->best_parent_rate = parent_req.rate;
+ req->rate = parent_req.rate;
+
+ return 0;
+ }
req->rate = core->rate;
return 0;
@@ -1436,13 +1761,16 @@ unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
int ret;
struct clk_rate_request req;
- clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
- req.rate = rate;
+ clk_core_init_rate_req(hw->core, &req, rate);
+
+ trace_clk_rate_request_start(&req);
ret = clk_core_round_rate_nolock(hw->core, &req);
if (ret)
return 0;
+ trace_clk_rate_request_done(&req);
+
return req.rate;
}
EXPORT_SYMBOL_GPL(clk_hw_round_rate);
@@ -1469,11 +1797,14 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
- clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
- req.rate = rate;
+ clk_core_init_rate_req(clk->core, &req, rate);
+
+ trace_clk_rate_request_start(&req);
ret = clk_core_round_rate_nolock(clk->core, &req);
+ trace_clk_rate_request_done(&req);
+
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
@@ -1599,6 +1930,7 @@ static unsigned long clk_recalc(struct clk_core *core,
/**
* __clk_recalc_rates
* @core: first clk in the subtree
+ * @update_req: Whether req_rate should be updated with the new rate
* @msg: notification type (see include/linux/clk.h)
*
* Walks the subtree of clks starting with clk and recalculates rates as it
@@ -1608,7 +1940,8 @@ static unsigned long clk_recalc(struct clk_core *core,
* clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
* if necessary.
*/
-static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
+static void __clk_recalc_rates(struct clk_core *core, bool update_req,
+ unsigned long msg)
{
unsigned long old_rate;
unsigned long parent_rate = 0;
@@ -1622,6 +1955,8 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
parent_rate = core->parent->rate;
core->rate = clk_recalc(core, parent_rate);
+ if (update_req)
+ core->req_rate = core->rate;
/*
* ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
@@ -1631,13 +1966,13 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
__clk_notify(core, msg, old_rate, core->rate);
hlist_for_each_entry(child, &core->children, child_node)
- __clk_recalc_rates(child, msg);
+ __clk_recalc_rates(child, update_req, msg);
}
static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
{
if (core && (core->flags & CLK_GET_RATE_NOCACHE))
- __clk_recalc_rates(core, 0);
+ __clk_recalc_rates(core, false, 0);
return clk_core_get_rate_nolock(core);
}
@@ -1647,8 +1982,9 @@ static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
* @clk: the clk whose rate is being returned
*
* Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
- * is set, which means a recalc_rate will be issued.
- * If clk is NULL then returns 0.
+ * is set, which means a recalc_rate will be issued. Can be called regardless of
+ * the clock enabledness. If clk is NULL, or if an error occurred, then returns
+ * 0.
*/
unsigned long clk_get_rate(struct clk *clk)
{
@@ -1852,6 +2188,7 @@ static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
flags = clk_enable_lock();
clk_reparent(core, old_parent);
clk_enable_unlock(flags);
+
__clk_set_parent_after(core, old_parent, parent);
return ret;
@@ -1940,7 +2277,7 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
unsigned long min_rate;
unsigned long max_rate;
int p_index = 0;
- long ret;
+ int ret;
/* sanity */
if (IS_ERR_OR_NULL(core))
@@ -1957,16 +2294,16 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
if (clk_core_can_round(core)) {
struct clk_rate_request req;
- req.rate = rate;
- req.min_rate = min_rate;
- req.max_rate = max_rate;
+ clk_core_init_rate_req(core, &req, rate);
- clk_core_init_rate_req(core, &req);
+ trace_clk_rate_request_start(&req);
ret = clk_core_determine_round_nolock(core, &req);
if (ret < 0)
return NULL;
+ trace_clk_rate_request_done(&req);
+
best_parent_rate = req.best_parent_rate;
new_rate = req.rate;
parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
@@ -2160,11 +2497,14 @@ static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
if (cnt < 0)
return cnt;
- clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
- req.rate = req_rate;
+ clk_core_init_rate_req(core, &req, req_rate);
+
+ trace_clk_rate_request_start(&req);
ret = clk_core_round_rate_nolock(core, &req);
+ trace_clk_rate_request_done(&req);
+
/* restore the protection */
clk_core_rate_restore_protect(core, cnt);
@@ -2176,7 +2516,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
{
struct clk_core *top, *fail_clk;
unsigned long rate;
- int ret = 0;
+ int ret;
if (!core)
return 0;
@@ -2312,19 +2652,15 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
}
EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
-/**
- * clk_set_rate_range - set a rate range for a clock source
- * @clk: clock source
- * @min: desired minimum clock rate in Hz, inclusive
- * @max: desired maximum clock rate in Hz, inclusive
- *
- * Returns success (0) or negative errno.
- */
-int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+static int clk_set_rate_range_nolock(struct clk *clk,
+ unsigned long min,
+ unsigned long max)
{
int ret = 0;
unsigned long old_min, old_max, rate;
+ lockdep_assert_held(&prepare_lock);
+
if (!clk)
return 0;
@@ -2337,8 +2673,6 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
return -EINVAL;
}
- clk_prepare_lock();
-
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
@@ -2348,37 +2682,66 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
clk->min_rate = min;
clk->max_rate = max;
- rate = clk_core_get_rate_nolock(clk->core);
- if (rate < min || rate > max) {
- /*
- * FIXME:
- * We are in bit of trouble here, current rate is outside the
- * the requested range. We are going try to request appropriate
- * range boundary but there is a catch. It may fail for the
- * usual reason (clock broken, clock protected, etc) but also
- * because:
- * - round_rate() was not favorable and fell on the wrong
- * side of the boundary
- * - the determine_rate() callback does not really check for
- * this corner case when determining the rate
- */
+ if (!clk_core_check_boundaries(clk->core, min, max)) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (rate < min)
- rate = min;
- else
- rate = max;
+ rate = clk->core->req_rate;
+ if (clk->core->flags & CLK_GET_RATE_NOCACHE)
+ rate = clk_core_get_rate_recalc(clk->core);
- ret = clk_core_set_rate_nolock(clk->core, rate);
- if (ret) {
- /* rollback the changes */
- clk->min_rate = old_min;
- clk->max_rate = old_max;
- }
+ /*
+ * Since the boundaries have been changed, let's give the
+ * opportunity to the provider to adjust the clock rate based on
+ * the new boundaries.
+ *
+ * We also need to handle the case where the clock is currently
+ * outside of the boundaries. Clamping the last requested rate
+ * to the current minimum and maximum will also handle this.
+ *
+ * FIXME:
+ * There is a catch. It may fail for the usual reason (clock
+ * broken, clock protected, etc) but also because:
+ * - round_rate() was not favorable and fell on the wrong
+ * side of the boundary
+ * - the determine_rate() callback does not really check for
+ * this corner case when determining the rate
+ */
+ rate = clamp(rate, min, max);
+ ret = clk_core_set_rate_nolock(clk->core, rate);
+ if (ret) {
+ /* rollback the changes */
+ clk->min_rate = old_min;
+ clk->max_rate = old_max;
}
+out:
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
+ return ret;
+}
+
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Return: 0 for success or negative errno on failure.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+{
+ int ret;
+
+ if (!clk)
+ return 0;
+
+ clk_prepare_lock();
+
+ ret = clk_set_rate_range_nolock(clk, min, max);
+
clk_prepare_unlock();
return ret;
@@ -2458,7 +2821,7 @@ static void clk_core_reparent(struct clk_core *core,
{
clk_reparent(core, new_parent);
__clk_recalc_accuracies(core);
- __clk_recalc_rates(core, POST_RATE_CHANGE);
+ __clk_recalc_rates(core, true, POST_RATE_CHANGE);
}
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
@@ -2479,27 +2842,13 @@ void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
*
* Returns true if @parent is a possible parent for @clk, false otherwise.
*/
-bool clk_has_parent(struct clk *clk, struct clk *parent)
+bool clk_has_parent(const struct clk *clk, const struct clk *parent)
{
- struct clk_core *core, *parent_core;
- int i;
-
/* NULL clocks should be nops, so return success if either is NULL. */
if (!clk || !parent)
return true;
- core = clk->core;
- parent_core = parent->core;
-
- /* Optimize for the case where the parent is already the parent. */
- if (core->parent == parent_core)
- return true;
-
- for (i = 0; i < core->num_parents; i++)
- if (!strcmp(core->parents[i].name, parent_core->name))
- return true;
-
- return false;
+ return clk_core_has_parent(clk->core, parent->core);
}
EXPORT_SYMBOL_GPL(clk_has_parent);
@@ -2556,9 +2905,9 @@ static int clk_core_set_parent_nolock(struct clk_core *core,
/* propagate rate an accuracy recalculation accordingly */
if (ret) {
- __clk_recalc_rates(core, ABORT_RATE_CHANGE);
+ __clk_recalc_rates(core, true, ABORT_RATE_CHANGE);
} else {
- __clk_recalc_rates(core, POST_RATE_CHANGE);
+ __clk_recalc_rates(core, true, POST_RATE_CHANGE);
__clk_recalc_accuracies(core);
}
@@ -2937,28 +3286,41 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
int level)
{
int phase;
+ struct clk *clk_user;
+ int multi_node = 0;
- seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
+ seq_printf(s, "%*s%-*s %-7d %-8d %-8d %-11lu %-10lu ",
level * 3 + 1, "",
- 30 - level * 3, c->name,
+ 35 - level * 3, c->name,
c->enable_count, c->prepare_count, c->protect_count,
clk_core_get_rate_recalc(c),
clk_core_get_accuracy_recalc(c));
phase = clk_core_get_phase(c);
if (phase >= 0)
- seq_printf(s, "%5d", phase);
+ seq_printf(s, "%-5d", phase);
else
seq_puts(s, "-----");
- seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000));
+ seq_printf(s, " %-6d", clk_core_get_scaled_duty_cycle(c, 100000));
if (c->ops->is_enabled)
- seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N');
+ seq_printf(s, " %5c ", clk_core_is_enabled(c) ? 'Y' : 'N');
else if (!c->ops->enable)
- seq_printf(s, " %9c\n", 'Y');
+ seq_printf(s, " %5c ", 'Y');
else
- seq_printf(s, " %9c\n", '?');
+ seq_printf(s, " %5c ", '?');
+
+ hlist_for_each_entry(clk_user, &c->clks, clks_node) {
+ seq_printf(s, "%*s%-*s %-25s\n",
+ level * 3 + 2 + 105 * multi_node, "",
+ 30,
+ clk_user->dev_id ? clk_user->dev_id : "deviceless",
+ clk_user->con_id ? clk_user->con_id : "no_connection_id");
+
+ multi_node = 1;
+ }
+
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
@@ -2966,9 +3328,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
{
struct clk_core *child;
- clk_pm_runtime_get(c);
clk_summary_show_one(s, c, level);
- clk_pm_runtime_put(c);
hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
@@ -2977,11 +3337,16 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk_core *c;
- struct hlist_head **lists = (struct hlist_head **)s->private;
+ struct hlist_head **lists = s->private;
+ int ret;
+
+ seq_puts(s, " enable prepare protect duty hardware connection\n");
+ seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n");
+ seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
- seq_puts(s, " enable prepare protect duty hardware\n");
- seq_puts(s, " clock count count count rate accuracy phase cycle enable\n");
- seq_puts(s, "-------------------------------------------------------------------------------------------------------\n");
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
clk_prepare_lock();
@@ -2990,6 +3355,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
clk_summary_show_subtree(s, c, 0);
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
return 0;
}
@@ -3036,9 +3402,15 @@ static int clk_dump_show(struct seq_file *s, void *data)
{
struct clk_core *c;
bool first_node = true;
- struct hlist_head **lists = (struct hlist_head **)s->private;
+ struct hlist_head **lists = s->private;
+ int ret;
+
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
seq_putc(s, '{');
+
clk_prepare_lock();
for (; *lists; lists++) {
@@ -3051,6 +3423,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
}
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
seq_puts(s, "}\n");
return 0;
@@ -3078,6 +3451,21 @@ static int clk_rate_set(void *data, u64 val)
#define clk_rate_mode 0644
+static int clk_phase_set(void *data, u64 val)
+{
+ struct clk_core *core = data;
+ int degrees = do_div(val, 360);
+ int ret;
+
+ clk_prepare_lock();
+ ret = clk_core_set_phase_nolock(core, degrees);
+ clk_prepare_unlock();
+
+ return ret;
+}
+
+#define clk_phase_mode 0644
+
static int clk_prepare_enable_set(void *data, u64 val)
{
struct clk_core *core = data;
@@ -3105,6 +3493,9 @@ DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get,
#else
#define clk_rate_set NULL
#define clk_rate_mode 0444
+
+#define clk_phase_set NULL
+#define clk_phase_mode 0644
#endif
static int clk_rate_get(void *data, u64 *val)
@@ -3120,6 +3511,16 @@ static int clk_rate_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
+static int clk_phase_get(void *data, u64 *val)
+{
+ struct clk_core *core = data;
+
+ *val = core->phase;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(clk_phase_fops, clk_phase_get, clk_phase_set, "%llu\n");
+
static const struct {
unsigned long flag;
const char *name;
@@ -3165,6 +3566,7 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core,
unsigned int i, char terminator)
{
struct clk_core *parent;
+ const char *name = NULL;
/*
* Go through the following options to fetch a parent's name.
@@ -3179,18 +3581,20 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core,
* registered (yet).
*/
parent = clk_core_get_parent_by_index(core, i);
- if (parent)
+ if (parent) {
seq_puts(s, parent->name);
- else if (core->parents[i].name)
+ } else if (core->parents[i].name) {
seq_puts(s, core->parents[i].name);
- else if (core->parents[i].fw_name)
+ } else if (core->parents[i].fw_name) {
seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
- else if (core->parents[i].index >= 0)
- seq_puts(s,
- of_clk_get_parent_name(core->of_node,
- core->parents[i].index));
- else
- seq_puts(s, "(missing)");
+ } else {
+ if (core->parents[i].index >= 0)
+ name = of_clk_get_parent_name(core->of_node, core->parents[i].index);
+ if (!name)
+ name = "(missing)";
+
+ seq_puts(s, name);
+ }
seq_putc(s, terminator);
}
@@ -3310,7 +3714,8 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
- debugfs_create_u32("clk_phase", 0444, root, &core->phase);
+ debugfs_create_file("clk_phase", clk_phase_mode, root, core,
+ &clk_phase_fops);
debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
@@ -3446,7 +3851,7 @@ static void clk_core_reparent_orphans_nolock(void)
/*
* We need to use __clk_set_parent_before() and _after() to
- * to properly migrate any prepare/enable count of the orphan
+ * properly migrate any prepare/enable count of the orphan
* clock. This is important for CLK_IS_CRITICAL clocks, which
* are enabled during init but might not have a parent yet.
*/
@@ -3455,7 +3860,20 @@ static void clk_core_reparent_orphans_nolock(void)
__clk_set_parent_before(orphan, parent);
__clk_set_parent_after(orphan, parent, NULL);
__clk_recalc_accuracies(orphan);
- __clk_recalc_rates(orphan, 0);
+ __clk_recalc_rates(orphan, true, 0);
+
+ /*
+ * __clk_init_parent() will set the initial req_rate to
+ * 0 if the clock doesn't have clk_ops::recalc_rate and
+ * is an orphan when it's registered.
+ *
+ * 'req_rate' is used by clk_set_rate_range() and
+ * clk_put() to trigger a clk_set_rate() call whenever
+ * the boundaries are modified. Let's make sure
+ * 'req_rate' is set to something non-zero so that
+ * clk_set_rate_range() doesn't drop the frequency.
+ */
+ orphan->req_rate = orphan->rate;
}
}
}
@@ -3513,6 +3931,13 @@ static int __clk_core_init(struct clk_core *core)
goto out;
}
+ if (core->ops->set_parent && !core->ops->determine_rate) {
+ pr_err("%s: %s must implement .set_parent & .determine_rate\n",
+ __func__, core->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
if (core->num_parents > 1 && !core->ops->get_parent) {
pr_err("%s: %s must implement .get_parent as it has multi parents\n",
__func__, core->name);
@@ -3570,6 +3995,8 @@ static int __clk_core_init(struct clk_core *core)
hlist_add_head(&core->child_node, &clk_orphan_list);
core->orphan = true;
}
+ hash_add(clk_hashtable, &core->hashtable_node,
+ full_name_hash(NULL, core->name, strlen(core->name)));
/*
* Set clk's accuracy. The preferred method is to use
@@ -3642,13 +4069,11 @@ static int __clk_core_init(struct clk_core *core)
}
clk_core_reparent_orphans_nolock();
-
-
- kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
unlock:
if (ret) {
+ hash_del(&core->hashtable_node);
hlist_del_init(&core->child_node);
core->hw->core = NULL;
}
@@ -3773,8 +4198,9 @@ struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id)
{
struct device *dev = hw->core->dev;
+ const char *name = dev ? dev_name(dev) : NULL;
- return clk_hw_create_clk(dev, hw, dev_name(dev), con_id);
+ return clk_hw_create_clk(dev, hw, name, con_id);
}
EXPORT_SYMBOL(clk_hw_get_clk);
@@ -3872,6 +4298,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
kfree(core->parents);
}
+/* Free memory allocated for a struct clk_core */
+static void __clk_release(struct kref *ref)
+{
+ struct clk_core *core = container_of(ref, struct clk_core, ref);
+
+ if (core->rpm_enabled) {
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_del(&core->rpm_node);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+
+ clk_core_free_parent_map(core);
+ kfree_const(core->name);
+ kfree(core);
+}
+
static struct clk *
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
@@ -3892,6 +4334,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
goto fail_out;
}
+ kref_init(&core->ref);
+
core->name = kstrdup_const(init->name, GFP_KERNEL);
if (!core->name) {
ret = -ENOMEM;
@@ -3904,9 +4348,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
}
core->ops = init->ops;
- if (dev && pm_runtime_enabled(dev))
- core->rpm_enabled = true;
core->dev = dev;
+ clk_pm_runtime_init(core);
core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
@@ -3946,13 +4389,18 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
hw->clk = NULL;
fail_create_clk:
- clk_core_free_parent_map(core);
fail_parents:
fail_ops:
- kfree_const(core->name);
fail_name:
- kfree(core);
+ kref_put(&core->ref, __clk_release);
fail_out:
+ if (dev) {
+ dev_err_probe(dev, ret, "failed to register clk '%s' (%pS)\n",
+ init->name, hw);
+ } else {
+ pr_err("%pOF: error %pe: failed to register clk '%s' (%pS)\n",
+ np, ERR_PTR(ret), init->name, hw);
+ }
return ERR_PTR(ret);
}
@@ -4031,18 +4479,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(of_clk_hw_register);
-/* Free memory allocated for a clock. */
-static void __clk_release(struct kref *ref)
-{
- struct clk_core *core = container_of(ref, struct clk_core, ref);
-
- lockdep_assert_held(&prepare_lock);
-
- clk_core_free_parent_map(core);
- kfree_const(core->name);
- kfree(core);
-}
-
/*
* Empty clk_ops for unregistered clocks. These are used temporarily
* after clk_unregister() was called on a clock and until last clock
@@ -4069,17 +4505,24 @@ static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
return -ENXIO;
}
+static int clk_nodrv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return -ENXIO;
+}
+
static const struct clk_ops clk_nodrv_ops = {
.enable = clk_nodrv_prepare_enable,
.disable = clk_nodrv_disable_unprepare,
.prepare = clk_nodrv_prepare_enable,
.unprepare = clk_nodrv_disable_unprepare,
+ .determine_rate = clk_nodrv_determine_rate,
.set_rate = clk_nodrv_set_rate,
.set_parent = clk_nodrv_set_parent,
};
static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
- struct clk_core *target)
+ const struct clk_core *target)
{
int i;
struct clk_core *child;
@@ -4095,7 +4538,7 @@ static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
/* Remove this clk from all parent caches */
static void clk_core_evict_parent_cache(struct clk_core *core)
{
- struct hlist_head **lists;
+ const struct hlist_head **lists;
struct clk_core *root;
lockdep_assert_held(&prepare_lock);
@@ -4126,7 +4569,8 @@ void clk_unregister(struct clk *clk)
if (ops == &clk_nodrv_ops) {
pr_err("%s: unregistered clock: %s\n", __func__,
clk->core->name);
- goto unlock;
+ clk_prepare_unlock();
+ return;
}
/*
* Assign empty clock ops for consumers that might still hold
@@ -4151,6 +4595,7 @@ void clk_unregister(struct clk *clk)
clk_core_evict_parent_cache(clk->core);
+ hash_del(&clk->core->hashtable_node);
hlist_del_init(&clk->core->child_node);
if (clk->core->prepare_count)
@@ -4160,11 +4605,10 @@ void clk_unregister(struct clk *clk)
if (clk->core->protect_count)
pr_warn("%s: unregistering protected clock: %s\n",
__func__, clk->core->name);
+ clk_prepare_unlock();
kref_put(&clk->core->ref, __clk_release);
free_clk(clk);
-unlock:
- clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unregister);
@@ -4249,54 +4693,6 @@ int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register);
-static int devm_clk_match(struct device *dev, void *res, void *data)
-{
- struct clk *c = res;
- if (WARN_ON(!c))
- return 0;
- return c == data;
-}
-
-static int devm_clk_hw_match(struct device *dev, void *res, void *data)
-{
- struct clk_hw *hw = res;
-
- if (WARN_ON(!hw))
- return 0;
- return hw == data;
-}
-
-/**
- * devm_clk_unregister - resource managed clk_unregister()
- * @dev: device that is unregistering the clock data
- * @clk: clock to unregister
- *
- * Deallocate a clock allocated with devm_clk_register(). Normally
- * this function will not need to be called and the resource management
- * code will ensure that the resource is freed.
- */
-void devm_clk_unregister(struct device *dev, struct clk *clk)
-{
- WARN_ON(devres_release(dev, devm_clk_unregister_cb, devm_clk_match, clk));
-}
-EXPORT_SYMBOL_GPL(devm_clk_unregister);
-
-/**
- * devm_clk_hw_unregister - resource managed clk_hw_unregister()
- * @dev: device that is unregistering the hardware-specific clock data
- * @hw: link to hardware-specific clock data
- *
- * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
- * this function will not need to be called and the resource management
- * code will ensure that the resource is freed.
- */
-void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
-{
- WARN_ON(devres_release(dev, devm_clk_hw_unregister_cb, devm_clk_hw_match,
- hw));
-}
-EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
-
static void devm_clk_release(struct device *dev, void *res)
{
clk_put(*(struct clk **)res);
@@ -4365,18 +4761,17 @@ void __clk_put(struct clk *clk)
clk->exclusive_count = 0;
}
- hlist_del(&clk->clks_node);
- if (clk->min_rate > clk->core->req_rate ||
- clk->max_rate < clk->core->req_rate)
- clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
+ clk_core_unlink_consumer(clk);
- owner = clk->core->owner;
- kref_put(&clk->core->ref, __clk_release);
+ /* If we had any boundaries on that clock, let's drop them. */
+ if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
+ clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
clk_prepare_unlock();
+ owner = clk->core->owner;
+ kref_put(&clk->core->ref, __clk_release);
module_put(owner);
-
free_clk(clk);
}
@@ -4510,6 +4905,7 @@ int devm_clk_notifier_register(struct device *dev, struct clk *clk,
if (!ret) {
devres->clk = clk;
devres->nb = nb;
+ devres_add(dev, devres);
} else {
devres_free(devres);
}
@@ -4697,8 +5093,8 @@ static struct device_node *get_clk_provider_node(struct device *dev)
np = dev->of_node;
parent_np = dev->parent ? dev->parent->of_node : NULL;
- if (!of_find_property(np, "#clock-cells", NULL))
- if (of_find_property(parent_np, "#clock-cells", NULL))
+ if (!of_property_present(np, "#clock-cells"))
+ if (of_property_present(parent_np, "#clock-cells"))
np = parent_np;
return np;
@@ -4769,32 +5165,6 @@ void of_clk_del_provider(struct device_node *np)
}
EXPORT_SYMBOL_GPL(of_clk_del_provider);
-static int devm_clk_provider_match(struct device *dev, void *res, void *data)
-{
- struct device_node **np = res;
-
- if (WARN_ON(!np || !*np))
- return 0;
-
- return *np == data;
-}
-
-/**
- * devm_of_clk_del_provider() - Remove clock provider registered using devm
- * @dev: Device to whose lifetime the clock provider was bound
- */
-void devm_of_clk_del_provider(struct device *dev)
-{
- int ret;
- struct device_node *np = get_clk_provider_node(dev);
-
- ret = devres_release(dev, devm_of_clk_release_provider,
- devm_clk_provider_match, np);
-
- WARN_ON(ret);
-}
-EXPORT_SYMBOL(devm_of_clk_del_provider);
-
/**
* of_parse_clkspec() - Parse a DT clock specifier for a given device node
* @np: device node to parse clock specifier from
@@ -4861,7 +5231,7 @@ static int of_parse_clkspec(const struct device_node *np, int index,
* clocks.
*/
np = np->parent;
- if (np && !of_get_property(np, "clock-ranges", NULL))
+ if (np && !of_property_present(np, "clock-ranges"))
break;
index = 0;
}
@@ -4893,6 +5263,10 @@ of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
if (!clkspec)
return ERR_PTR(-EINVAL);
+ /* Check if node in clkspec is in disabled/fail state */
+ if (!of_device_is_available(clkspec->np))
+ return ERR_PTR(-ENOENT);
+
mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np) {
@@ -4993,9 +5367,8 @@ EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
const char *of_clk_get_parent_name(const struct device_node *np, int index)
{
struct of_phandle_args clkspec;
- struct property *prop;
const char *clk_name;
- const __be32 *vp;
+ bool found = false;
u32 pv;
int rc;
int count;
@@ -5012,16 +5385,19 @@ const char *of_clk_get_parent_name(const struct device_node *np, int index)
/* if there is an indices property, use it to transfer the index
* specified into an array offset for the clock-output-names property.
*/
- of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
+ of_property_for_each_u32(clkspec.np, "clock-indices", pv) {
if (index == pv) {
index = count;
+ found = true;
break;
}
count++;
}
/* We went off the end of 'clock-indices' without finding it */
- if (prop && !vp)
+ if (of_property_present(clkspec.np, "clock-indices") && !found) {
+ of_node_put(clkspec.np);
return NULL;
+ }
if (of_property_read_string_index(clkspec.np, "clock-output-names",
index,
@@ -5133,14 +5509,12 @@ static int parent_ready(struct device_node *np)
int of_clk_detect_critical(struct device_node *np, int index,
unsigned long *flags)
{
- struct property *prop;
- const __be32 *cur;
uint32_t idx;
if (!np || !flags)
return -EINVAL;
- of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
+ of_property_for_each_u32(np, "clock-critical", idx)
if (index == idx)
*flags |= CLK_IS_CRITICAL;