summaryrefslogtreecommitdiff
path: root/drivers/opp/core.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-02-20 21:42:18 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-20 21:42:18 -0800
commit02f9fc286e039d0bef7284fb1200ee755b525bde (patch)
tree2ce396501ee2590ca7e37c3ea6f058d164b41474 /drivers/opp/core.c
parent5d99aa093b566d234b51b7822c67059e2bd3ed8d (diff)
parenta9a939cb34dfffb9f43b988a681d2566ca157b74 (diff)
Merge tag 'pm-5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki: "These add a new power capping facility allowing aggregate power constraints to be applied to sets of devices in a distributed manner, add a new CPU ID to the RAPL power capping driver and improve it, drop a cpufreq driver belonging to a platform that is not supported any more, drop two redundant cpufreq driver flags, update cpufreq drivers (intel_pstate, brcmstb-avs, qcom-hw), update the operating performance points (OPP) framework (code cleanups, new helpers, devfreq-related modifications), clean up devfreq, extend the PM clock layer, update the cpupower utility and make assorted janitorial changes. Specifics: - Add new power capping facility called DTPM (Dynamic Thermal Power Management), based on the existing power capping framework, to allow aggregate power constraints to be applied to sets of devices in a distributed manner, along with a CPU backend driver based on the Energy Model (Daniel Lezcano, Dan Carpenter, Colin Ian King). - Add AlderLake Mobile support to the Intel RAPL power capping driver and make it use the topology interface when laying out the system topology (Zhang Rui, Yunfeng Ye). - Drop the cpufreq tango driver belonging to a platform that is not supported any more (Arnd Bergmann). - Drop the redundant CPUFREQ_STICKY and CPUFREQ_PM_NO_WARN cpufreq driver flags (Viresh Kumar). - Update cpufreq drivers: * Fix max CPU frequency discovery in the intel_pstate driver and make janitorial changes in it (Chen Yu, Rafael Wysocki, Nigel Christian). * Fix resource leaks in the brcmstb-avs-cpufreq driver (Christophe JAILLET). * Make the tegra20 driver use the resource-managed API (Dmitry Osipenko). * Enable boost support in the qcom-hw driver (Shawn Guo). - Update the operating performance points (OPP) framework: * Clean up the OPP core (Dmitry Osipenko, Viresh Kumar). * Extend the OPP API by adding new helpers to it (Dmitry Osipenko, Viresh Kumar). * Allow required OPPs to be used for devfreq devices and update the devfreq governor code accordingly (Saravana Kannan). * Prepare the framework for introducing new dev_pm_opp_set_opp() helper (Viresh Kumar). * Drop dev_pm_opp_set_bw() and update related drivers (Viresh Kumar). * Allow lazy linking of required-OPPs (Viresh Kumar). - Simplify and clean up devfreq somewhat (Lukasz Luba, Yang Li, Pierre Kuo). - Update the generic power domains (genpd) framework: * Use device's next wakeup to determine domain idle state (Lina Iyer). * Improve initialization and debug (Dmitry Osipenko). * Simplify computations (Abaci Team). - Make janitorial changes in the core code handling system sleep and PM-runtime (Bhaskar Chowdhury, Bjorn Helgaas, Rikard Falkeborn, Zqiang). - Update the MAINTAINERS entry for the exynos cpuidle driver and drop DEBUG definition from intel_idle (Krzysztof Kozlowski, Tom Rix). - Extend the PM clock layer to cover clocks that must sleep (Nicolas Pitre). - Update the cpupower utility: * Update cpupower command, add support for AMD family 0x19 and clean up the code to remove many of the family checks to make future family updates easier (Nathan Fontenot, Robert Richter). * Add Makefile dependencies for install targets to allow building cpupower in parallel rather than serially (Ivan Babrou). - Make janitorial changes in power management Kconfig (Lukasz Luba)" * tag 'pm-5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (89 commits) MAINTAINERS: cpuidle: exynos: include header in file pattern powercap: intel_rapl: Use topology interface in rapl_init_domains() powercap: intel_rapl: Use topology interface in rapl_add_package() PM: sleep: Constify static struct attribute_group PM: Kconfig: remove unneeded "default n" options PM: EM: update Kconfig description and drop "default n" option cpufreq: Remove unused flag CPUFREQ_PM_NO_WARN cpufreq: Remove CPUFREQ_STICKY flag PM / devfreq: Add required OPPs support to passive governor PM / devfreq: Cache OPP table reference in devfreq OPP: Add function to look up required OPP's for a given OPP PM / devfreq: rk3399_dmc: Remove unneeded semicolon opp: Replace ENOTSUPP with EOPNOTSUPP opp: Fix "foo * bar" should be "foo *bar" opp: Don't ignore clk_get() errors other than -ENOENT opp: Update bandwidth requirements based on scaling up/down opp: Allow lazy-linking of required-opps opp: Remove dev_pm_opp_set_bw() devfreq: tegra30: Migrate to dev_pm_opp_set_opp() drm: msm: Migrate to dev_pm_opp_set_opp() ...
Diffstat (limited to 'drivers/opp/core.c')
-rw-r--r--drivers/opp/core.c798
1 files changed, 576 insertions, 222 deletions
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 8c905aabacc0..c3f3d9249cc5 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -27,6 +27,10 @@
* various states of availability.
*/
LIST_HEAD(opp_tables);
+
+/* OPP tables with uninitialized required OPPs */
+LIST_HEAD(lazy_opp_tables);
+
/* Lock to allow exclusive modification to the device and opp lists */
DEFINE_MUTEX(opp_table_lock);
/* Flag indicating that opp_tables list is being updated at the moment */
@@ -146,6 +150,32 @@ unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
/**
+ * dev_pm_opp_get_required_pstate() - Gets the required performance state
+ * corresponding to an available opp
+ * @opp: opp for which performance state has to be returned for
+ * @index: index of the required opp
+ *
+ * Return: performance state read from device tree corresponding to the
+ * required opp, else return 0.
+ */
+unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
+ unsigned int index)
+{
+ if (IS_ERR_OR_NULL(opp) || !opp->available ||
+ index >= opp->opp_table->required_opp_count) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return 0;
+ }
+
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(opp->opp_table))
+ return 0;
+
+ return opp->required_opps[index]->pstate;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate);
+
+/**
* dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
* @opp: opp for which turbo mode is being verified
*
@@ -449,6 +479,55 @@ struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
+/**
+ * dev_pm_opp_find_level_ceil() - search for an rounded up level
+ * @dev: device for which we do this operation
+ * @level: level to search for
+ *
+ * Return: Searches for rounded up match in the opp table and returns pointer
+ * to the matching opp if found, else returns ERR_PTR in case of error and
+ * should be handled using IS_ERR. Error return values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ int r = PTR_ERR(opp_table);
+
+ dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
+ return ERR_PTR(r);
+ }
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available && temp_opp->level >= *level) {
+ opp = temp_opp;
+ *level = opp->level;
+
+ /* Increment the reference count of OPP */
+ dev_pm_opp_get(opp);
+ break;
+ }
+ }
+
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
+
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
unsigned long *freq)
{
@@ -655,6 +734,10 @@ static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
{
int ret;
+ /* We may reach here for devices which don't change frequency */
+ if (IS_ERR(clk))
+ return 0;
+
ret = clk_set_rate(clk, freq);
if (ret) {
dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
@@ -666,12 +749,12 @@ static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
static int _generic_set_opp_regulator(struct opp_table *opp_table,
struct device *dev,
- unsigned long old_freq,
+ struct dev_pm_opp *opp,
unsigned long freq,
- struct dev_pm_opp_supply *old_supply,
- struct dev_pm_opp_supply *new_supply)
+ int scaling_down)
{
struct regulator *reg = opp_table->regulators[0];
+ struct dev_pm_opp *old_opp = opp_table->current_opp;
int ret;
/* This function only supports single regulator per device */
@@ -681,8 +764,8 @@ static int _generic_set_opp_regulator(struct opp_table *opp_table,
}
/* Scaling up? Scale voltage before frequency */
- if (freq >= old_freq) {
- ret = _set_opp_voltage(dev, reg, new_supply);
+ if (!scaling_down) {
+ ret = _set_opp_voltage(dev, reg, opp->supplies);
if (ret)
goto restore_voltage;
}
@@ -693,8 +776,8 @@ static int _generic_set_opp_regulator(struct opp_table *opp_table,
goto restore_voltage;
/* Scaling down? Scale voltage after frequency */
- if (freq < old_freq) {
- ret = _set_opp_voltage(dev, reg, new_supply);
+ if (scaling_down) {
+ ret = _set_opp_voltage(dev, reg, opp->supplies);
if (ret)
goto restore_freq;
}
@@ -712,19 +795,18 @@ static int _generic_set_opp_regulator(struct opp_table *opp_table,
return 0;
restore_freq:
- if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq))
+ if (_generic_set_opp_clk_only(dev, opp_table->clk, old_opp->rate))
dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_freq);
+ __func__, old_opp->rate);
restore_voltage:
/* This shouldn't harm even if the voltages weren't updated earlier */
- if (old_supply)
- _set_opp_voltage(dev, reg, old_supply);
+ _set_opp_voltage(dev, reg, old_opp->supplies);
return ret;
}
static int _set_opp_bw(const struct opp_table *opp_table,
- struct dev_pm_opp *opp, struct device *dev, bool remove)
+ struct dev_pm_opp *opp, struct device *dev)
{
u32 avg, peak;
int i, ret;
@@ -733,7 +815,7 @@ static int _set_opp_bw(const struct opp_table *opp_table,
return 0;
for (i = 0; i < opp_table->path_count; i++) {
- if (remove) {
+ if (!opp) {
avg = 0;
peak = 0;
} else {
@@ -743,7 +825,7 @@ static int _set_opp_bw(const struct opp_table *opp_table,
ret = icc_set_bw(opp_table->paths[i], avg, peak);
if (ret) {
dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",
- remove ? "remove" : "set", i, ret);
+ opp ? "set" : "remove", i, ret);
return ret;
}
}
@@ -752,29 +834,31 @@ static int _set_opp_bw(const struct opp_table *opp_table,
}
static int _set_opp_custom(const struct opp_table *opp_table,
- struct device *dev, unsigned long old_freq,
- unsigned long freq,
- struct dev_pm_opp_supply *old_supply,
- struct dev_pm_opp_supply *new_supply)
+ struct device *dev, struct dev_pm_opp *opp,
+ unsigned long freq)
{
- struct dev_pm_set_opp_data *data;
+ struct dev_pm_set_opp_data *data = opp_table->set_opp_data;
+ struct dev_pm_opp *old_opp = opp_table->current_opp;
int size;
- data = opp_table->set_opp_data;
+ /*
+ * We support this only if dev_pm_opp_set_regulators() was called
+ * earlier.
+ */
+ if (opp_table->sod_supplies) {
+ size = sizeof(*old_opp->supplies) * opp_table->regulator_count;
+ memcpy(data->old_opp.supplies, old_opp->supplies, size);
+ memcpy(data->new_opp.supplies, opp->supplies, size);
+ data->regulator_count = opp_table->regulator_count;
+ } else {
+ data->regulator_count = 0;
+ }
+
data->regulators = opp_table->regulators;
- data->regulator_count = opp_table->regulator_count;
data->clk = opp_table->clk;
data->dev = dev;
-
- data->old_opp.rate = old_freq;
- size = sizeof(*old_supply) * opp_table->regulator_count;
- if (!old_supply)
- memset(data->old_opp.supplies, 0, size);
- else
- memcpy(data->old_opp.supplies, old_supply, size);
-
+ data->old_opp.rate = old_opp->rate;
data->new_opp.rate = freq;
- memcpy(data->new_opp.supplies, new_supply, size);
return opp_table->set_opp(data);
}
@@ -809,6 +893,10 @@ static int _set_required_opps(struct device *dev,
if (!required_opp_tables)
return 0;
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(opp_table))
+ return -EBUSY;
+
/* Single genpd case */
if (!genpd_virt_devs)
return _set_required_opp(dev, dev, opp, 0);
@@ -841,38 +929,32 @@ static int _set_required_opps(struct device *dev,
return ret;
}
-/**
- * dev_pm_opp_set_bw() - sets bandwidth levels corresponding to an opp
- * @dev: device for which we do this operation
- * @opp: opp based on which the bandwidth levels are to be configured
- *
- * This configures the bandwidth to the levels specified by the OPP. However
- * if the OPP specified is NULL the bandwidth levels are cleared out.
- *
- * Return: 0 on success or a negative error value.
- */
-int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp)
+static void _find_current_opp(struct device *dev, struct opp_table *opp_table)
{
- struct opp_table *opp_table;
- int ret;
+ struct dev_pm_opp *opp = ERR_PTR(-ENODEV);
+ unsigned long freq;
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "%s: device opp table doesn't exist\n", __func__);
- return PTR_ERR(opp_table);
+ if (!IS_ERR(opp_table->clk)) {
+ freq = clk_get_rate(opp_table->clk);
+ opp = _find_freq_ceil(opp_table, &freq);
}
- if (opp)
- ret = _set_opp_bw(opp_table, opp, dev, false);
- else
- ret = _set_opp_bw(opp_table, NULL, dev, true);
+ /*
+ * Unable to find the current OPP ? Pick the first from the list since
+ * it is in ascending order, otherwise rest of the code will need to
+ * make special checks to validate current_opp.
+ */
+ if (IS_ERR(opp)) {
+ mutex_lock(&opp_table->lock);
+ opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node);
+ dev_pm_opp_get(opp);
+ mutex_unlock(&opp_table->lock);
+ }
- dev_pm_opp_put_opp_table(opp_table);
- return ret;
+ opp_table->current_opp = opp;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_bw);
-static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table)
+static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
{
int ret;
@@ -887,7 +969,7 @@ static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table)
if (!_get_opp_count(opp_table))
return 0;
- ret = _set_opp_bw(opp_table, NULL, dev, true);
+ ret = _set_opp_bw(opp_table, NULL, dev);
if (ret)
return ret;
@@ -900,6 +982,89 @@ static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table)
return ret;
}
+static int _set_opp(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, unsigned long freq)
+{
+ struct dev_pm_opp *old_opp;
+ int scaling_down, ret;
+
+ if (unlikely(!opp))
+ return _disable_opp_table(dev, opp_table);
+
+ /* Find the currently set OPP if we don't know already */
+ if (unlikely(!opp_table->current_opp))
+ _find_current_opp(dev, opp_table);
+
+ old_opp = opp_table->current_opp;
+
+ /* Return early if nothing to do */
+ if (opp_table->enabled && old_opp == opp) {
+ dev_dbg(dev, "%s: OPPs are same, nothing to do\n", __func__);
+ return 0;
+ }
+
+ dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n",
+ __func__, old_opp->rate, freq, old_opp->level, opp->level,
+ old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0,
+ opp->bandwidth ? opp->bandwidth[0].peak : 0);
+
+ scaling_down = _opp_compare_key(old_opp, opp);
+ if (scaling_down == -1)
+ scaling_down = 0;
+
+ /* Scaling up? Configure required OPPs before frequency */
+ if (!scaling_down) {
+ ret = _set_required_opps(dev, opp_table, opp, true);
+ if (ret) {
+ dev_err(dev, "Failed to set required opps: %d\n", ret);
+ return ret;
+ }
+
+ ret = _set_opp_bw(opp_table, opp, dev);
+ if (ret) {
+ dev_err(dev, "Failed to set bw: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (opp_table->set_opp) {
+ ret = _set_opp_custom(opp_table, dev, opp, freq);
+ } else if (opp_table->regulators) {
+ ret = _generic_set_opp_regulator(opp_table, dev, opp, freq,
+ scaling_down);
+ } else {
+ /* Only frequency scaling */
+ ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
+ }
+
+ if (ret)
+ return ret;
+
+ /* Scaling down? Configure required OPPs after frequency */
+ if (scaling_down) {
+ ret = _set_opp_bw(opp_table, opp, dev);
+ if (ret) {
+ dev_err(dev, "Failed to set bw: %d\n", ret);
+ return ret;
+ }
+
+ ret = _set_required_opps(dev, opp_table, opp, false);
+ if (ret) {
+ dev_err(dev, "Failed to set required opps: %d\n", ret);
+ return ret;
+ }
+ }
+
+ opp_table->enabled = true;
+ dev_pm_opp_put(old_opp);
+
+ /* Make sure current_opp doesn't get freed */
+ dev_pm_opp_get(opp);
+ opp_table->current_opp = opp;
+
+ return ret;
+}
+
/**
* dev_pm_opp_set_rate() - Configure new OPP based on frequency
* @dev: device for which we do this operation
@@ -914,118 +1079,85 @@ static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table)
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
struct opp_table *opp_table;
- unsigned long freq, old_freq, temp_freq;
- struct dev_pm_opp *old_opp, *opp;
- struct clk *clk;
+ unsigned long freq = 0, temp_freq;
+ struct dev_pm_opp *opp = NULL;
int ret;
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
- dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+ dev_err(dev, "%s: device's opp table doesn't exist\n", __func__);
return PTR_ERR(opp_table);
}
- if (unlikely(!target_freq)) {
- ret = _opp_set_rate_zero(dev, opp_table);
- goto put_opp_table;
- }
-
- clk = opp_table->clk;
- if (IS_ERR(clk)) {
- dev_err(dev, "%s: No clock available for the device\n",
- __func__);
- ret = PTR_ERR(clk);
- goto put_opp_table;
- }
-
- freq = clk_round_rate(clk, target_freq);
- if ((long)freq <= 0)
- freq = target_freq;
-
- old_freq = clk_get_rate(clk);
-
- /* Return early if nothing to do */
- if (opp_table->enabled && old_freq == freq) {
- dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
- __func__, freq);
- ret = 0;
- goto put_opp_table;
- }
-
- /*
- * For IO devices which require an OPP on some platforms/SoCs
- * while just needing to scale the clock on some others
- * we look for empty OPP tables with just a clock handle and
- * scale only the clk. This makes dev_pm_opp_set_rate()
- * equivalent to a clk_set_rate()
- */
- if (!_get_opp_count(opp_table)) {
- ret = _generic_set_opp_clk_only(dev, clk, freq);
- goto put_opp_table;
- }
+ if (target_freq) {
+ /*
+ * For IO devices which require an OPP on some platforms/SoCs
+ * while just needing to scale the clock on some others
+ * we look for empty OPP tables with just a clock handle and
+ * scale only the clk. This makes dev_pm_opp_set_rate()
+ * equivalent to a clk_set_rate()
+ */
+ if (!_get_opp_count(opp_table)) {
+ ret = _generic_set_opp_clk_only(dev, opp_table->clk, target_freq);
+ goto put_opp_table;
+ }
- temp_freq = old_freq;
- old_opp = _find_freq_ceil(opp_table, &temp_freq);
- if (IS_ERR(old_opp)) {
- dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
- __func__, old_freq, PTR_ERR(old_opp));
- }
+ freq = clk_round_rate(opp_table->clk, target_freq);
+ if ((long)freq <= 0)
+ freq = target_freq;
- temp_freq = freq;
- opp = _find_freq_ceil(opp_table, &temp_freq);
- if (IS_ERR(opp)) {
- ret = PTR_ERR(opp);
- dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
- __func__, freq, ret);
- goto put_old_opp;
+ /*
+ * The clock driver may support finer resolution of the
+ * frequencies than the OPP table, don't update the frequency we
+ * pass to clk_set_rate() here.
+ */
+ temp_freq = freq;
+ opp = _find_freq_ceil(opp_table, &temp_freq);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
+ __func__, freq, ret);
+ goto put_opp_table;
+ }
}
- dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
- old_freq, freq);
+ ret = _set_opp(dev, opp_table, opp, freq);
- /* Scaling up? Configure required OPPs before frequency */
- if (freq >= old_freq) {
- ret = _set_required_opps(dev, opp_table, opp, true);
- if (ret)
- goto put_opp;
- }
-
- if (opp_table->set_opp) {
- ret = _set_opp_custom(opp_table, dev, old_freq, freq,
- IS_ERR(old_opp) ? NULL : old_opp->supplies,
- opp->supplies);
- } else if (opp_table->regulators) {
- ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
- IS_ERR(old_opp) ? NULL : old_opp->supplies,
- opp->supplies);
- } else {
- /* Only frequency scaling */
- ret = _generic_set_opp_clk_only(dev, clk, freq);
- }
+ if (target_freq)
+ dev_pm_opp_put(opp);
+put_opp_table:
+ dev_pm_opp_put_opp_table(opp_table);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
- /* Scaling down? Configure required OPPs after frequency */
- if (!ret && freq < old_freq) {
- ret = _set_required_opps(dev, opp_table, opp, false);
- if (ret)
- dev_err(dev, "Failed to set required opps: %d\n", ret);
- }
+/**
+ * dev_pm_opp_set_opp() - Configure device for OPP
+ * @dev: device for which we do this operation
+ * @opp: OPP to set to
+ *
+ * This configures the device based on the properties of the OPP passed to this
+ * routine.
+ *
+ * Return: 0 on success, a negative error number otherwise.
+ */
+int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
+{
+ struct opp_table *opp_table;
+ int ret;
- if (!ret) {
- ret = _set_opp_bw(opp_table, opp, dev, false);
- if (!ret)
- opp_table->enabled = true;
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+ return PTR_ERR(opp_table);
}
-put_opp:
- dev_pm_opp_put(opp);
-put_old_opp:
- if (!IS_ERR(old_opp))
- dev_pm_opp_put(old_opp);
-put_opp_table:
+ ret = _set_opp(dev, opp_table, opp, opp ? opp->rate : 0);
dev_pm_opp_put_opp_table(opp_table);
+
return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp);
/* OPP-dev Helpers */
static void _remove_opp_dev(struct opp_device *opp_dev,
@@ -1075,6 +1207,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
mutex_init(&opp_table->lock);
mutex_init(&opp_table->genpd_virt_dev_lock);
INIT_LIST_HEAD(&opp_table->dev_list);
+ INIT_LIST_HEAD(&opp_table->lazy);
/* Mark regulator count uninitialized */
opp_table->regulator_count = -1;
@@ -1087,21 +1220,11 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
_of_init_opp_table(opp_table, dev, index);
- /* Find clk for the device */
- opp_table->clk = clk_get(dev, NULL);
- if (IS_ERR(opp_table->clk)) {
- ret = PTR_ERR(opp_table->clk);
- if (ret == -EPROBE_DEFER)
- goto remove_opp_dev;
-
- dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
- }
-
/* Find interconnect path(s) for the device */
ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
if (ret) {
if (ret == -EPROBE_DEFER)
- goto put_clk;
+ goto remove_opp_dev;
dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
__func__, ret);
@@ -1113,9 +1236,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
return opp_table;
-put_clk:
- if (!IS_ERR(opp_table->clk))
- clk_put(opp_table->clk);
remove_opp_dev:
_remove_opp_dev(opp_dev, opp_table);
err:
@@ -1128,6 +1248,37 @@ void _get_opp_table_kref(struct opp_table *opp_table)
kref_get(&opp_table->kref);
}
+static struct opp_table *_update_opp_table_clk(struct device *dev,
+ struct opp_table *opp_table,
+ bool getclk)
+{
+ int ret;
+
+ /*
+ * Return early if we don't need to get clk or we have already tried it
+ * earlier.
+ */
+ if (!getclk || IS_ERR(opp_table) || opp_table->clk)
+ return opp_table;
+
+ /* Find clk for the device */
+ opp_table->clk = clk_get(dev, NULL);
+
+ ret = PTR_ERR_OR_ZERO(opp_table->clk);
+ if (!ret)
+ return opp_table;
+
+ if (ret == -ENOENT) {
+ dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
+ return opp_table;
+ }
+
+ dev_pm_opp_put_opp_table(opp_table);
+ dev_err_probe(dev, ret, "Couldn't find clock\n");
+
+ return ERR_PTR(ret);
+}
+
/*
* We need to make sure that the OPP table for a device doesn't get added twice,
* if this routine gets called in parallel with the same device pointer.
@@ -1143,7 +1294,8 @@ void _get_opp_table_kref(struct opp_table *opp_table)
* uses the opp_tables_busy flag to indicate if another creator is in the middle
* of adding an OPP table and others should wait for it to finish.
*/
-struct opp_table *_add_opp_table_indexed(struct device *dev, int index)
+struct opp_table *_add_opp_table_indexed(struct device *dev, int index,
+ bool getclk)
{
struct opp_table *opp_table;
@@ -1190,12 +1342,12 @@ again:
unlock:
mutex_unlock(&opp_table_lock);
- return opp_table;
+ return _update_opp_table_clk(dev, opp_table, getclk);
}
-struct opp_table *_add_opp_table(struct device *dev)
+static struct opp_table *_add_opp_table(struct device *dev, bool getclk)
{
- return _add_opp_table_indexed(dev, 0);
+ return _add_opp_table_indexed(dev, 0, getclk);
}
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
@@ -1214,6 +1366,9 @@ static void _opp_table_kref_release(struct kref *kref)
list_del(&opp_table->node);
mutex_unlock(&opp_table_lock);
+ if (opp_table->current_opp)
+ dev_pm_opp_put(opp_table->current_opp);
+
_of_clear_opp_table(opp_table);
/* Release clk */
@@ -1508,6 +1663,21 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
return 0;
}
+void _required_opps_available(struct dev_pm_opp *opp, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (opp->required_opps[i]->available)
+ continue;
+
+ opp->available = false;
+ pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n",
+ __func__, opp->required_opps[i]->np, opp->rate);
+ return;
+ }
+}
+
/*
* Returns:
* 0: On success. And appropriate error message for duplicate OPPs.
@@ -1527,12 +1697,10 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
mutex_lock(&opp_table->lock);
head = &opp_table->opp_list;
- if (likely(!rate_not_available)) {
- ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
- if (ret) {
- mutex_unlock(&opp_table->lock);
- return ret;
- }
+ ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
+ if (ret) {
+ mutex_unlock(&opp_table->lock);
+ return ret;
}
list_add(&new_opp->node, head);
@@ -1549,6 +1717,12 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
__func__, new_opp->rate);
}
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(opp_table))
+ return 0;
+
+ _required_opps_available(new_opp, opp_table->required_opp_count);
+
return 0;
}
@@ -1631,7 +1805,7 @@ struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
{
struct opp_table *opp_table;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -1693,7 +1867,7 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
struct opp_table *opp_table;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -1737,38 +1911,6 @@ void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
-static int _allocate_set_opp_data(struct opp_table *opp_table)
-{
- struct dev_pm_set_opp_data *data;
- int len, count = opp_table->regulator_count;
-
- if (WARN_ON(!opp_table->regulators))
- return -EINVAL;
-
- /* space for set_opp_data */
- len = sizeof(*data);
-
- /* space for old_opp.supplies and new_opp.supplies */
- len += 2 * sizeof(struct dev_pm_opp_supply) * count;
-
- data = kzalloc(len, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->old_opp.supplies = (void *)(data + 1);
- data->new_opp.supplies = data->old_opp.supplies + count;
-
- opp_table->set_opp_data = data;
-
- return 0;
-}
-
-static void _free_set_opp_data(struct opp_table *opp_table)
-{
- kfree(opp_table->set_opp_data);
- opp_table->set_opp_data = NULL;
-}
-
/**
* dev_pm_opp_set_regulators() - Set regulator names for the device
* @dev: Device for which regulator name is being set.
@@ -1785,11 +1927,12 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
const char * const names[],
unsigned int count)
{
+ struct dev_pm_opp_supply *supplies;
struct opp_table *opp_table;
struct regulator *reg;
int ret, i;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -1826,10 +1969,19 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
opp_table->regulator_count = count;
- /* Allocate block only once to pass to set_opp() routines */
- ret = _allocate_set_opp_data(opp_table);
- if (ret)
+ supplies = kmalloc_array(count * 2, sizeof(*supplies), GFP_KERNEL);
+ if (!supplies) {
+ ret = -ENOMEM;
goto free_regulators;
+ }
+
+ mutex_lock(&opp_table->lock);
+ opp_table->sod_supplies = supplies;
+ if (opp_table->set_opp_data) {
+ opp_table->set_opp_data->old_opp.supplies = supplies;
+ opp_table->set_opp_data->new_opp.supplies = supplies + count;
+ }
+ mutex_unlock(&opp_table->lock);
return opp_table;
@@ -1872,7 +2024,15 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_put(opp_table->regulators[i]);
- _free_set_opp_data(opp_table);
+ mutex_lock(&opp_table->lock);
+ if (opp_table->set_opp_data) {
+ opp_table->set_opp_data->old_opp.supplies = NULL;
+ opp_table->set_opp_data->new_opp.supplies = NULL;
+ }
+
+ kfree(opp_table->sod_supplies);
+ opp_table->sod_supplies = NULL;
+ mutex_unlock(&opp_table->lock);
kfree(opp_table->regulators);
opp_table->regulators = NULL;
@@ -1900,7 +2060,7 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
struct opp_table *opp_table;
int ret;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -1910,9 +2070,11 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
goto err;
}
- /* Already have default clk set, free it */
- if (!IS_ERR(opp_table->clk))
- clk_put(opp_table->clk);
+ /* clk shouldn't be initialized at this point */
+ if (WARN_ON(opp_table->clk)) {
+ ret = -EBUSY;
+ goto err;
+ }
/* Find clk for the device */
opp_table->clk = clk_get(dev, name);
@@ -1966,12 +2128,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
int (*set_opp)(struct dev_pm_set_opp_data *data))
{
+ struct dev_pm_set_opp_data *data;
struct opp_table *opp_table;
if (!set_opp)
return ERR_PTR(-EINVAL);
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -1982,8 +2145,23 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
}
/* Another CPU that shares the OPP table has set the helper ? */
- if (!opp_table->set_opp)
- opp_table->set_opp = set_opp;
+ if (opp_table->set_opp)
+ return opp_table;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&opp_table->lock);
+ opp_table->set_opp_data = data;
+ if (opp_table->sod_supplies) {
+ data->old_opp.supplies = opp_table->sod_supplies;
+ data->new_opp.supplies = opp_table->sod_supplies +
+ opp_table->regulator_count;
+ }
+ mutex_unlock(&opp_table->lock);
+
+ opp_table->set_opp = set_opp;
return opp_table;
}
@@ -2005,10 +2183,50 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
WARN_ON(!list_empty(&opp_table->opp_list));
opp_table->set_opp = NULL;
+
+ mutex_lock(&opp_table->lock);
+ kfree(opp_table->set_opp_data);
+ opp_table->set_opp_data = NULL;
+ mutex_unlock(&opp_table->lock);
+
dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
+static void devm_pm_opp_unregister_set_opp_helper(void *data)
+{
+ dev_pm_opp_unregister_set_opp_helper(data);
+}
+
+/**
+ * devm_pm_opp_register_set_opp_helper() - Register custom set OPP helper
+ * @dev: Device for which the helper is getting registered.
+ * @set_opp: Custom set OPP helper.
+ *
+ * This is a resource-managed version of dev_pm_opp_register_set_opp_helper().
+ *
+ * Return: pointer to 'struct opp_table' on success and errorno otherwise.
+ */
+struct opp_table *
+devm_pm_opp_register_set_opp_helper(struct device *dev,
+ int (*set_opp)(struct dev_pm_set_opp_data *data))
+{
+ struct opp_table *opp_table;
+ int err;
+
+ opp_table = dev_pm_opp_register_set_opp_helper(dev, set_opp);
+ if (IS_ERR(opp_table))
+ return opp_table;
+
+ err = devm_add_action_or_reset(dev, devm_pm_opp_unregister_set_opp_helper,
+ opp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ return opp_table;
+}
+EXPORT_SYMBOL_GPL(devm_pm_opp_register_set_opp_helper);
+
static void _opp_detach_genpd(struct opp_table *opp_table)
{
int index;
@@ -2058,7 +2276,7 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
int index = 0, ret = -EINVAL;
const char **name = names;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -2144,6 +2362,97 @@ void dev_pm_opp_detach_genpd(struct opp_table *opp_table)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd);
+static void devm_pm_opp_detach_genpd(void *data)
+{
+ dev_pm_opp_detach_genpd(data);
+}
+
+/**
+ * devm_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual
+ * device pointer
+ * @dev: Consumer device for which the genpd is getting attached.
+ * @names: Null terminated array of pointers containing names of genpd to attach.
+ * @virt_devs: Pointer to return the array of virtual devices.
+ *
+ * This is a resource-managed version of dev_pm_opp_attach_genpd().
+ *
+ * Return: pointer to 'struct opp_table' on success and errorno otherwise.
+ */
+struct opp_table *
+devm_pm_opp_attach_genpd(struct device *dev, const char **names,
+ struct device ***virt_devs)
+{
+ struct opp_table *opp_table;
+ int err;
+
+ opp_table = dev_pm_opp_attach_genpd(dev, names, virt_devs);
+ if (IS_ERR(opp_table))
+ return opp_table;
+
+ err = devm_add_action_or_reset(dev, devm_pm_opp_detach_genpd,
+ opp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ return opp_table;
+}
+EXPORT_SYMBOL_GPL(devm_pm_opp_attach_genpd);
+
+/**
+ * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP.
+ * @src_table: OPP table which has @dst_table as one of its required OPP table.
+ * @dst_table: Required OPP table of the @src_table.
+ * @src_opp: OPP from the @src_table.
+ *
+ * This function returns the OPP (present in @dst_table) pointed out by the
+ * "required-opps" property of the @src_opp (present in @src_table).
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ *
+ * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise.
+ */
+struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table,
+ struct opp_table *dst_table,
+ struct dev_pm_opp *src_opp)
+{
+ struct dev_pm_opp *opp, *dest_opp = ERR_PTR(-ENODEV);
+ int i;
+
+ if (!src_table || !dst_table || !src_opp ||
+ !src_table->required_opp_tables)
+ return ERR_PTR(-EINVAL);
+
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(src_table))
+ return ERR_PTR(-EBUSY);
+
+ for (i = 0; i < src_table->required_opp_count; i++) {
+ if (src_table->required_opp_tables[i] == dst_table) {
+ mutex_lock(&src_table->lock);
+
+ list_for_each_entry(opp, &src_table->opp_list, node) {
+ if (opp == src_opp) {
+ dest_opp = opp->required_opps[i];
+ dev_pm_opp_get(dest_opp);
+ break;
+ }
+ }
+
+ mutex_unlock(&src_table->lock);
+ break;
+ }
+ }
+
+ if (IS_ERR(dest_opp)) {
+ pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__,
+ src_table, dst_table);
+ }
+
+ return dest_opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp);
+
/**
* dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
* @src_table: OPP table which has dst_table as one of its required OPP table.
@@ -2172,9 +2481,13 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
* and so none of them have the "required-opps" property set. Return the
* pstate of the src_table as it is in such cases.
*/
- if (!src_table->required_opp_count)
+ if (!src_table || !src_table->required_opp_count)
return pstate;
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(src_table))
+ return -EBUSY;
+
for (i = 0; i < src_table->required_opp_count; i++) {
if (src_table->required_opp_tables[i]->np == dst_table->np)
break;
@@ -2226,7 +2539,7 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
struct opp_table *opp_table;
int ret;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, true);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
@@ -2504,3 +2817,44 @@ void dev_pm_opp_remove_table(struct device *dev)
dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
+
+/**
+ * dev_pm_opp_sync_regulators() - Sync state of voltage regulators
+ * @dev: device for which we do this operation
+ *
+ * Sync voltage state of the OPP table regulators.
+ *
+ * Return: 0 on success or a negative error value.
+ */
+int dev_pm_opp_sync_regulators(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct regulator *reg;
+ int i, ret = 0;
+
+ /* Device may not have OPP table */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return 0;
+
+ /* Regulator may not be required for the device */
+ if (unlikely(!opp_table->regulators))
+ goto put_table;
+
+ /* Nothing to sync if voltage wasn't changed */
+ if (!opp_table->enabled)
+ goto put_table;
+
+ for (i = 0; i < opp_table->regulator_count; i++) {
+ reg = opp_table->regulators[i];
+ ret = regulator_sync_voltage(reg);
+ if (ret)
+ break;
+ }
+put_table:
+ /* Drop reference taken by _find_opp_table() */
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators);