diff options
Diffstat (limited to 'drivers/opp')
-rw-r--r-- | drivers/opp/core.c | 741 | ||||
-rw-r--r-- | drivers/opp/cpu.c | 30 | ||||
-rw-r--r-- | drivers/opp/debugfs.c | 24 | ||||
-rw-r--r-- | drivers/opp/of.c | 291 | ||||
-rw-r--r-- | drivers/opp/opp.h | 9 | ||||
-rw-r--r-- | drivers/opp/ti-opp-supply.c | 8 |
6 files changed, 445 insertions, 658 deletions
diff --git a/drivers/opp/core.c b/drivers/opp/core.c index c4e0432ae42a..edbd60501cf0 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -40,17 +40,14 @@ static DEFINE_XARRAY_ALLOC1(opp_configs); static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table) { struct opp_device *opp_dev; - bool found = false; - mutex_lock(&opp_table->lock); + guard(mutex)(&opp_table->lock); + list_for_each_entry(opp_dev, &opp_table->dev_list, node) - if (opp_dev->dev == dev) { - found = true; - break; - } + if (opp_dev->dev == dev) + return true; - mutex_unlock(&opp_table->lock); - return found; + return false; } static struct opp_table *_find_opp_table_unlocked(struct device *dev) @@ -58,10 +55,8 @@ static struct opp_table *_find_opp_table_unlocked(struct device *dev) struct opp_table *opp_table; list_for_each_entry(opp_table, &opp_tables, node) { - if (_find_opp_dev(dev, opp_table)) { - _get_opp_table_kref(opp_table); - return opp_table; - } + if (_find_opp_dev(dev, opp_table)) + return dev_pm_opp_get_opp_table_ref(opp_table); } return ERR_PTR(-ENODEV); @@ -80,18 +75,13 @@ static struct opp_table *_find_opp_table_unlocked(struct device *dev) */ struct opp_table *_find_opp_table(struct device *dev) { - struct opp_table *opp_table; - if (IS_ERR_OR_NULL(dev)) { pr_err("%s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); } - mutex_lock(&opp_table_lock); - opp_table = _find_opp_table_unlocked(dev); - mutex_unlock(&opp_table_lock); - - return opp_table; + guard(mutex)(&opp_table_lock); + return _find_opp_table_unlocked(dev); } /* @@ -101,11 +91,55 @@ struct opp_table *_find_opp_table(struct device *dev) * representation in the OPP table and manage the clock configuration themselves * in an platform specific way. */ -static bool assert_single_clk(struct opp_table *opp_table) +static bool assert_single_clk(struct opp_table *opp_table, + unsigned int __always_unused index) { return !WARN_ON(opp_table->clk_count > 1); } +/* + * Returns true if clock table is large enough to contain the clock index. + */ +static bool assert_clk_index(struct opp_table *opp_table, + unsigned int index) +{ + return opp_table->clk_count > index; +} + +/* + * Returns true if bandwidth table is large enough to contain the bandwidth index. + */ +static bool assert_bandwidth_index(struct opp_table *opp_table, + unsigned int index) +{ + return opp_table->path_count > index; +} + +/** + * dev_pm_opp_get_bw() - Gets the bandwidth corresponding to an opp + * @opp: opp for which bandwidth has to be returned for + * @peak: select peak or average bandwidth + * @index: bandwidth index + * + * Return: bandwidth in kBps, else return 0 + */ +unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index) +{ + if (IS_ERR_OR_NULL(opp)) { + pr_err("%s: Invalid parameters\n", __func__); + return 0; + } + + if (index >= opp->opp_table->path_count) + return 0; + + if (!opp->bandwidth) + return 0; + + return peak ? opp->bandwidth[index].peak : opp->bandwidth[index].avg; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_bw); + /** * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp * @opp: opp for which voltage has to be returned for @@ -275,18 +309,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); */ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) { - struct opp_table *opp_table; - unsigned long clock_latency_ns; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return 0; - clock_latency_ns = opp_table->clock_latency_ns_max; - - dev_pm_opp_put_opp_table(opp_table); - - return clock_latency_ns; + return opp_table->clock_latency_ns_max; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); @@ -298,7 +327,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); */ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); struct dev_pm_opp *opp; struct regulator *reg; unsigned long latency_ns = 0; @@ -314,33 +343,31 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) /* Regulator may not be required for the device */ if (!opp_table->regulators) - goto put_opp_table; + return 0; count = opp_table->regulator_count; uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); if (!uV) - goto put_opp_table; + return 0; - mutex_lock(&opp_table->lock); + scoped_guard(mutex, &opp_table->lock) { + for (i = 0; i < count; i++) { + uV[i].min = ~0; + uV[i].max = 0; - for (i = 0; i < count; i++) { - uV[i].min = ~0; - uV[i].max = 0; + list_for_each_entry(opp, &opp_table->opp_list, node) { + if (!opp->available) + continue; - list_for_each_entry(opp, &opp_table->opp_list, node) { - if (!opp->available) - continue; - - if (opp->supplies[i].u_volt_min < uV[i].min) - uV[i].min = opp->supplies[i].u_volt_min; - if (opp->supplies[i].u_volt_max > uV[i].max) - uV[i].max = opp->supplies[i].u_volt_max; + if (opp->supplies[i].u_volt_min < uV[i].min) + uV[i].min = opp->supplies[i].u_volt_min; + if (opp->supplies[i].u_volt_max > uV[i].max) + uV[i].max = opp->supplies[i].u_volt_max; + } } } - mutex_unlock(&opp_table->lock); - /* * The caller needs to ensure that opp_table (and hence the regulator) * isn't freed, while we are executing this routine. @@ -353,8 +380,6 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) } kfree(uV); -put_opp_table: - dev_pm_opp_put_opp_table(opp_table); return latency_ns; } @@ -384,7 +409,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency); */ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); unsigned long freq = 0; opp_table = _find_opp_table(dev); @@ -394,8 +419,6 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) if (opp_table->suspend_opp && opp_table->suspend_opp->available) freq = dev_pm_opp_get_freq(opp_table->suspend_opp); - dev_pm_opp_put_opp_table(opp_table); - return freq; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq); @@ -405,15 +428,13 @@ int _get_opp_count(struct opp_table *opp_table) struct dev_pm_opp *opp; int count = 0; - mutex_lock(&opp_table->lock); + guard(mutex)(&opp_table->lock); list_for_each_entry(opp, &opp_table->opp_list, node) { if (opp->available) count++; } - mutex_unlock(&opp_table->lock); - return count; } @@ -426,21 +447,16 @@ int _get_opp_count(struct opp_table *opp_table) */ int dev_pm_opp_get_opp_count(struct device *dev) { - struct opp_table *opp_table; - int count; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { - count = PTR_ERR(opp_table); - dev_dbg(dev, "%s: OPP table not found (%d)\n", - __func__, count); - return count; + dev_dbg(dev, "%s: OPP table not found (%ld)\n", + __func__, PTR_ERR(opp_table)); + return PTR_ERR(opp_table); } - count = _get_opp_count(opp_table); - dev_pm_opp_put_opp_table(opp_table); - - return count; + return _get_opp_count(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); @@ -499,15 +515,15 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table, unsigned long (*read)(struct dev_pm_opp *opp, int index), bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, unsigned long opp_key, unsigned long key), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); /* Assert that the requirement is met */ - if (assert && !assert(opp_table)) + if (assert && !assert(opp_table, index)) return ERR_PTR(-EINVAL); - mutex_lock(&opp_table->lock); + guard(mutex)(&opp_table->lock); list_for_each_entry(temp_opp, &opp_table->opp_list, node) { if (temp_opp->available == available) { @@ -522,8 +538,6 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table, dev_pm_opp_get(opp); } - mutex_unlock(&opp_table->lock); - return opp; } @@ -532,10 +546,9 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, unsigned long opp_key, unsigned long key), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { - struct opp_table *opp_table; - struct dev_pm_opp *opp; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { @@ -544,18 +557,14 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available, return ERR_CAST(opp_table); } - opp = _opp_table_find_key(opp_table, key, index, available, read, - compare, assert); - - dev_pm_opp_put_opp_table(opp_table); - - return opp; + return _opp_table_find_key(opp_table, key, index, available, read, + compare, assert); } static struct dev_pm_opp *_find_key_exact(struct device *dev, unsigned long key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { /* * The value of key will be updated here, but will be ignored as the @@ -568,7 +577,7 @@ static struct dev_pm_opp *_find_key_exact(struct device *dev, static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table, unsigned long *key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { return _opp_table_find_key(opp_table, key, index, available, read, _compare_ceil, assert); @@ -577,7 +586,7 @@ static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table, static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { return _find_key(dev, key, index, available, read, _compare_ceil, assert); @@ -586,7 +595,7 @@ static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key, static struct dev_pm_opp *_find_key_floor(struct device *dev, unsigned long *key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { return _find_key(dev, key, index, available, read, _compare_floor, assert); @@ -647,7 +656,8 @@ struct dev_pm_opp * dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq, u32 index, bool available) { - return _find_key_exact(dev, freq, index, available, _read_freq, NULL); + return _find_key_exact(dev, freq, index, available, _read_freq, + assert_clk_index); } EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed); @@ -707,7 +717,8 @@ struct dev_pm_opp * dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq, u32 index) { - return _find_key_ceil(dev, freq, index, true, _read_freq, NULL); + return _find_key_ceil(dev, freq, index, true, _read_freq, + assert_clk_index); } EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed); @@ -760,7 +771,7 @@ struct dev_pm_opp * dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq, u32 index) { - return _find_key_floor(dev, freq, index, true, _read_freq, NULL); + return _find_key_floor(dev, freq, index, true, _read_freq, assert_clk_index); } EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed); @@ -878,7 +889,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, unsigned long temp = *bw; struct dev_pm_opp *opp; - opp = _find_key_ceil(dev, &temp, index, true, _read_bw, NULL); + opp = _find_key_ceil(dev, &temp, index, true, _read_bw, + assert_bandwidth_index); *bw = temp; return opp; } @@ -909,7 +921,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, unsigned long temp = *bw; struct dev_pm_opp *opp; - opp = _find_key_floor(dev, &temp, index, true, _read_bw, NULL); + opp = _find_key_floor(dev, &temp, index, true, _read_bw, + assert_bandwidth_index); *bw = temp; return opp; } @@ -1061,6 +1074,27 @@ static int _set_opp_bw(const struct opp_table *opp_table, return 0; } +static int _set_opp_level(struct device *dev, struct dev_pm_opp *opp) +{ + unsigned int level = 0; + int ret = 0; + + if (opp) { + if (opp->level == OPP_LEVEL_UNSET) + return 0; + + level = opp->level; + } + + /* Request a new performance state through the device's PM domain. */ + ret = dev_pm_domain_set_performance_state(dev, level); + if (ret) + dev_err(dev, "Failed to set performance state %u (%d)\n", level, + ret); + + return ret; +} + /* This is only called for PM domain for now */ static int _set_required_opps(struct device *dev, struct opp_table *opp_table, struct dev_pm_opp *opp, bool up) @@ -1091,7 +1125,7 @@ static int _set_required_opps(struct device *dev, struct opp_table *opp_table, if (devs[index]) { required_opp = opp ? opp->required_opps[index] : NULL; - ret = dev_pm_opp_set_opp(devs[index], required_opp); + ret = _set_opp_level(devs[index], required_opp); if (ret) return ret; } @@ -1102,28 +1136,6 @@ static int _set_required_opps(struct device *dev, struct opp_table *opp_table, return 0; } -static int _set_opp_level(struct device *dev, struct opp_table *opp_table, - struct dev_pm_opp *opp) -{ - unsigned int level = 0; - int ret = 0; - - if (opp) { - if (opp->level == OPP_LEVEL_UNSET) - return 0; - - level = opp->level; - } - - /* Request a new performance state through the device's PM domain. */ - ret = dev_pm_domain_set_performance_state(dev, level); - if (ret) - dev_err(dev, "Failed to set performance state %u (%d)\n", level, - ret); - - return ret; -} - static void _find_current_opp(struct device *dev, struct opp_table *opp_table) { struct dev_pm_opp *opp = ERR_PTR(-ENODEV); @@ -1140,10 +1152,9 @@ static void _find_current_opp(struct device *dev, struct opp_table *opp_table) * make special checks to validate current_opp. */ if (IS_ERR(opp)) { - mutex_lock(&opp_table->lock); - opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node); - dev_pm_opp_get(opp); - mutex_unlock(&opp_table->lock); + guard(mutex)(&opp_table->lock); + opp = dev_pm_opp_get(list_first_entry(&opp_table->opp_list, + struct dev_pm_opp, node)); } opp_table->current_opp = opp; @@ -1171,7 +1182,7 @@ static int _disable_opp_table(struct device *dev, struct opp_table *opp_table) if (opp_table->regulators) regulator_disable(opp_table->regulators[0]); - ret = _set_opp_level(dev, opp_table, NULL); + ret = _set_opp_level(dev, NULL); if (ret) goto out; @@ -1220,7 +1231,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, return ret; } - ret = _set_opp_level(dev, opp_table, opp); + ret = _set_opp_level(dev, opp); if (ret) return ret; @@ -1267,7 +1278,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, return ret; } - ret = _set_opp_level(dev, opp_table, opp); + ret = _set_opp_level(dev, opp); if (ret) return ret; @@ -1282,8 +1293,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, dev_pm_opp_put(old_opp); /* Make sure current_opp doesn't get freed */ - dev_pm_opp_get(opp); - opp_table->current_opp = opp; + opp_table->current_opp = dev_pm_opp_get(opp); return ret; } @@ -1301,11 +1311,10 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, */ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); + struct dev_pm_opp *opp __free(put_opp) = NULL; unsigned long freq = 0, temp_freq; - struct dev_pm_opp *opp = NULL; bool forced = false; - int ret; opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { @@ -1322,9 +1331,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) * equivalent to a clk_set_rate() */ if (!_get_opp_count(opp_table)) { - ret = opp_table->config_clks(dev, opp_table, NULL, - &target_freq, false); - goto put_opp_table; + return opp_table->config_clks(dev, opp_table, NULL, + &target_freq, false); } freq = clk_round_rate(opp_table->clk, target_freq); @@ -1339,10 +1347,9 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) temp_freq = freq; opp = _find_freq_ceil(opp_table, &temp_freq); if (IS_ERR(opp)) { - ret = PTR_ERR(opp); - dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", - __func__, freq, ret); - goto put_opp_table; + dev_err(dev, "%s: failed to find OPP for freq %lu (%ld)\n", + __func__, freq, PTR_ERR(opp)); + return PTR_ERR(opp); } /* @@ -1355,14 +1362,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) forced = opp_table->current_rate_single_clk != freq; } - ret = _set_opp(dev, opp_table, opp, &freq, forced); - - if (freq) - dev_pm_opp_put(opp); - -put_opp_table: - dev_pm_opp_put_opp_table(opp_table); - return ret; + return _set_opp(dev, opp_table, opp, &freq, forced); } EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); @@ -1378,8 +1378,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); */ int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) { - struct opp_table *opp_table; - int ret; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { @@ -1387,10 +1386,7 @@ int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) return PTR_ERR(opp_table); } - ret = _set_opp(dev, opp_table, opp, NULL, false); - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return _set_opp(dev, opp_table, opp, NULL, false); } EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp); @@ -1415,9 +1411,8 @@ struct opp_device *_add_opp_dev(const struct device *dev, /* Initialize opp-dev */ opp_dev->dev = dev; - mutex_lock(&opp_table->lock); - list_add(&opp_dev->node, &opp_table->dev_list); - mutex_unlock(&opp_table->lock); + scoped_guard(mutex, &opp_table->lock) + list_add(&opp_dev->node, &opp_table->dev_list); /* Create debugfs entries for the opp_table */ opp_debug_register(opp_dev, opp_table); @@ -1481,11 +1476,6 @@ err: return ERR_PTR(ret); } -void _get_opp_table_kref(struct opp_table *opp_table) -{ - kref_get(&opp_table->kref); -} - static struct opp_table *_update_opp_table_clk(struct device *dev, struct opp_table *opp_table, bool getclk) @@ -1646,6 +1636,13 @@ static void _opp_table_kref_release(struct kref *kref) kfree(opp_table); } +struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table) +{ + kref_get(&opp_table->kref); + return opp_table; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table_ref); + void dev_pm_opp_put_opp_table(struct opp_table *opp_table) { kref_put_mutex(&opp_table->kref, _opp_table_kref_release, @@ -1676,10 +1673,12 @@ static void _opp_kref_release(struct kref *kref) kfree(opp); } -void dev_pm_opp_get(struct dev_pm_opp *opp) +struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp) { kref_get(&opp->kref); + return opp; } +EXPORT_SYMBOL_GPL(dev_pm_opp_get); void dev_pm_opp_put(struct dev_pm_opp *opp) { @@ -1696,27 +1695,25 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put); */ void dev_pm_opp_remove(struct device *dev, unsigned long freq) { + struct opp_table *opp_table __free(put_opp_table); struct dev_pm_opp *opp = NULL, *iter; - struct opp_table *opp_table; opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return; - if (!assert_single_clk(opp_table)) - goto put_table; - - mutex_lock(&opp_table->lock); + if (!assert_single_clk(opp_table, 0)) + return; - list_for_each_entry(iter, &opp_table->opp_list, node) { - if (iter->rates[0] == freq) { - opp = iter; - break; + scoped_guard(mutex, &opp_table->lock) { + list_for_each_entry(iter, &opp_table->opp_list, node) { + if (iter->rates[0] == freq) { + opp = iter; + break; + } } } - mutex_unlock(&opp_table->lock); - if (opp) { dev_pm_opp_put(opp); @@ -1726,32 +1723,26 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", __func__, freq); } - -put_table: - /* Drop the reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove); static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table, bool dynamic) { - struct dev_pm_opp *opp = NULL, *temp; + struct dev_pm_opp *opp; + + guard(mutex)(&opp_table->lock); - mutex_lock(&opp_table->lock); - list_for_each_entry(temp, &opp_table->opp_list, node) { + list_for_each_entry(opp, &opp_table->opp_list, node) { /* * Refcount must be dropped only once for each OPP by OPP core, * do that with help of "removed" flag. */ - if (!temp->removed && dynamic == temp->dynamic) { - opp = temp; - break; - } + if (!opp->removed && dynamic == opp->dynamic) + return opp; } - mutex_unlock(&opp_table->lock); - return opp; + return NULL; } /* @@ -1775,20 +1766,14 @@ static void _opp_remove_all(struct opp_table *opp_table, bool dynamic) bool _opp_remove_all_static(struct opp_table *opp_table) { - mutex_lock(&opp_table->lock); - - if (!opp_table->parsed_static_opps) { - mutex_unlock(&opp_table->lock); - return false; - } + scoped_guard(mutex, &opp_table->lock) { + if (!opp_table->parsed_static_opps) + return false; - if (--opp_table->parsed_static_opps) { - mutex_unlock(&opp_table->lock); - return true; + if (--opp_table->parsed_static_opps) + return true; } - mutex_unlock(&opp_table->lock); - _opp_remove_all(opp_table, false); return true; } @@ -1801,16 +1786,13 @@ bool _opp_remove_all_static(struct opp_table *opp_table) */ void dev_pm_opp_remove_all_dynamic(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return; _opp_remove_all(opp_table, true); - - /* Drop the reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic); @@ -1995,17 +1977,15 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct list_head *head; int ret; - mutex_lock(&opp_table->lock); - head = &opp_table->opp_list; + scoped_guard(mutex, &opp_table->lock) { + head = &opp_table->opp_list; - ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); - if (ret) { - mutex_unlock(&opp_table->lock); - return ret; - } + ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); + if (ret) + return ret; - list_add(&new_opp->node, head); - mutex_unlock(&opp_table->lock); + list_add(&new_opp->node, head); + } new_opp->opp_table = opp_table; kref_init(&new_opp->kref); @@ -2055,7 +2035,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long tol, u_volt = data->u_volt; int ret; - if (!assert_single_clk(opp_table)) + if (!assert_single_clk(opp_table, 0)) return -EINVAL; new_opp = _opp_allocate(opp_table); @@ -2065,6 +2045,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev, /* populate the opp table */ new_opp->rates[0] = data->freq; new_opp->level = data->level; + new_opp->turbo = data->turbo; tol = u_volt * opp_table->voltage_tolerance_v1 / 100; new_opp->supplies[0].u_volt = u_volt; new_opp->supplies[0].u_volt_min = u_volt - tol; @@ -2106,8 +2087,8 @@ static int _opp_set_supported_hw(struct opp_table *opp_table, if (opp_table->supported_hw) return 0; - opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions), - GFP_KERNEL); + opp_table->supported_hw = kmemdup_array(versions, count, + sizeof(*versions), GFP_KERNEL); if (!opp_table->supported_hw) return -ENOMEM; @@ -2360,47 +2341,13 @@ static void _opp_put_config_regulators_helper(struct opp_table *opp_table) opp_table->config_regulators = NULL; } -static void _opp_detach_genpd(struct opp_table *opp_table) +static int _opp_set_required_dev(struct opp_table *opp_table, + struct device *dev, + struct device *required_dev, + unsigned int index) { - int index; - - for (index = 0; index < opp_table->required_opp_count; index++) { - if (!opp_table->required_devs[index]) - continue; - - dev_pm_domain_detach(opp_table->required_devs[index], false); - opp_table->required_devs[index] = NULL; - } -} - -/* - * Multiple generic power domains for a device are supported with the help of - * virtual genpd devices, which are created for each consumer device - genpd - * pair. These are the device structures which are attached to the power domain - * and are required by the OPP core to set the performance state of the genpd. - * The same API also works for the case where single genpd is available and so - * we don't need to support that separately. - * - * This helper will normally be called by the consumer driver of the device - * "dev", as only that has details of the genpd names. - * - * This helper needs to be called once with a list of all genpd to attach. - * Otherwise the original device structure will be used instead by the OPP core. - * - * The order of entries in the names array must match the order in which - * "required-opps" are added in DT. - */ -static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, - const char * const *names, struct device ***virt_devs) -{ - struct device *virt_dev; - int index = 0, ret = -EINVAL; - const char * const *name = names; - - if (!opp_table->required_devs) { - dev_err(dev, "Required OPPs not available, can't attach genpd\n"); - return -EINVAL; - } + struct opp_table *required_table, *pd_table; + struct device *gdev; /* Genpd core takes care of propagation to parent genpd */ if (opp_table->is_genpd) { @@ -2408,96 +2355,59 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, return -EOPNOTSUPP; } - /* Checking only the first one is enough ? */ - if (opp_table->required_devs[0]) - return 0; - - while (*name) { - if (index >= opp_table->required_opp_count) { - dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n", - *name, opp_table->required_opp_count, index); - goto err; - } - - virt_dev = dev_pm_domain_attach_by_name(dev, *name); - if (IS_ERR_OR_NULL(virt_dev)) { - ret = virt_dev ? PTR_ERR(virt_dev) : -ENODEV; - dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret); - goto err; - } - - /* - * Add the virtual genpd device as a user of the OPP table, so - * we can call dev_pm_opp_set_opp() on it directly. - * - * This will be automatically removed when the OPP table is - * removed, don't need to handle that here. - */ - if (!_add_opp_dev(virt_dev, opp_table->required_opp_tables[index])) { - ret = -ENOMEM; - goto err; - } - - opp_table->required_devs[index] = virt_dev; - index++; - name++; - } - - if (virt_devs) - *virt_devs = opp_table->required_devs; - - return 0; - -err: - _opp_detach_genpd(opp_table); - return ret; - -} - -static int _opp_set_required_devs(struct opp_table *opp_table, - struct device *dev, - struct device **required_devs) -{ - int i; - - if (!opp_table->required_devs) { + if (index >= opp_table->required_opp_count) { dev_err(dev, "Required OPPs not available, can't set required devs\n"); return -EINVAL; } - /* Another device that shares the OPP table has set the required devs ? */ - if (opp_table->required_devs[0]) - return 0; + required_table = opp_table->required_opp_tables[index]; + if (IS_ERR(required_table)) { + dev_err(dev, "Missing OPP table, unable to set the required devs\n"); + return -ENODEV; + } - for (i = 0; i < opp_table->required_opp_count; i++) { - /* Genpd core takes care of propagation to parent genpd */ - if (required_devs[i] && opp_table->is_genpd && - opp_table->required_opp_tables[i]->is_genpd) { - dev_err(dev, "%s: Operation not supported for genpds\n", __func__); - return -EOPNOTSUPP; + /* + * The required_opp_tables parsing is not perfect, as the OPP core does + * the parsing solely based on the DT node pointers. The core sets the + * required_opp_tables entry to the first OPP table in the "opp_tables" + * list, that matches with the node pointer. + * + * If the target DT OPP table is used by multiple devices and they all + * create separate instances of 'struct opp_table' from it, then it is + * possible that the required_opp_tables entry may be set to the + * incorrect sibling device. + * + * Cross check it again and fix if required. + */ + gdev = dev_to_genpd_dev(required_dev); + if (IS_ERR(gdev)) + return PTR_ERR(gdev); + + pd_table = _find_opp_table(gdev); + if (!IS_ERR(pd_table)) { + if (pd_table != required_table) { + dev_pm_opp_put_opp_table(required_table); + opp_table->required_opp_tables[index] = pd_table; + } else { + dev_pm_opp_put_opp_table(pd_table); } - - opp_table->required_devs[i] = required_devs[i]; } + opp_table->required_devs[index] = required_dev; return 0; } -static void _opp_put_required_devs(struct opp_table *opp_table) +static void _opp_put_required_dev(struct opp_table *opp_table, + unsigned int index) { - int i; - - for (i = 0; i < opp_table->required_opp_count; i++) - opp_table->required_devs[i] = NULL; + opp_table->required_devs[index] = NULL; } static void _opp_clear_config(struct opp_config_data *data) { - if (data->flags & OPP_CONFIG_REQUIRED_DEVS) - _opp_put_required_devs(data->opp_table); - else if (data->flags & OPP_CONFIG_GENPD) - _opp_detach_genpd(data->opp_table); - + if (data->flags & OPP_CONFIG_REQUIRED_DEV) + _opp_put_required_dev(data->opp_table, + data->required_dev_index); if (data->flags & OPP_CONFIG_REGULATOR) _opp_put_regulators(data->opp_table); if (data->flags & OPP_CONFIG_SUPPORTED_HW) @@ -2609,24 +2519,15 @@ int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config) data->flags |= OPP_CONFIG_REGULATOR; } - /* Attach genpds */ - if (config->genpd_names) { - if (config->required_devs) - goto err; - - ret = _opp_attach_genpd(opp_table, dev, config->genpd_names, - config->virt_devs); - if (ret) - goto err; - - data->flags |= OPP_CONFIG_GENPD; - } else if (config->required_devs) { - ret = _opp_set_required_devs(opp_table, dev, - config->required_devs); + if (config->required_dev) { + ret = _opp_set_required_dev(opp_table, dev, + config->required_dev, + config->required_dev_index); if (ret) goto err; - data->flags |= OPP_CONFIG_REQUIRED_DEVS; + data->required_dev_index = config->required_dev_index; + data->flags |= OPP_CONFIG_REQUIRED_DEV; } ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX), @@ -2731,18 +2632,16 @@ struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, return ERR_PTR(-EBUSY); for (i = 0; i < src_table->required_opp_count; i++) { - if (src_table->required_opp_tables[i] == dst_table) { - mutex_lock(&src_table->lock); + if (src_table->required_opp_tables[i] != dst_table) + continue; + scoped_guard(mutex, &src_table->lock) { list_for_each_entry(opp, &src_table->opp_list, node) { if (opp == src_opp) { - dest_opp = opp->required_opps[i]; - dev_pm_opp_get(dest_opp); + dest_opp = dev_pm_opp_get(opp->required_opps[i]); break; } } - - mutex_unlock(&src_table->lock); break; } } @@ -2774,7 +2673,6 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, unsigned int pstate) { struct dev_pm_opp *opp; - int dest_pstate = -EINVAL; int i; /* @@ -2808,22 +2706,17 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, return -EINVAL; } - mutex_lock(&src_table->lock); + guard(mutex)(&src_table->lock); list_for_each_entry(opp, &src_table->opp_list, node) { - if (opp->level == pstate) { - dest_pstate = opp->required_opps[i]->level; - goto unlock; - } + if (opp->level == pstate) + return opp->required_opps[i]->level; } pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table, dst_table); -unlock: - mutex_unlock(&src_table->lock); - - return dest_pstate; + return -EINVAL; } /** @@ -2878,46 +2771,38 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add_dynamic); static int _opp_set_availability(struct device *dev, unsigned long freq, bool availability_req) { - struct opp_table *opp_table; - struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); - int r = 0; + struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp; + struct opp_table *opp_table __free(put_opp_table); /* Find the opp_table */ opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { - r = PTR_ERR(opp_table); - dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); - return r; + dev_warn(dev, "%s: Device OPP not found (%ld)\n", __func__, + PTR_ERR(opp_table)); + return PTR_ERR(opp_table); } - if (!assert_single_clk(opp_table)) { - r = -EINVAL; - goto put_table; - } + if (!assert_single_clk(opp_table, 0)) + return -EINVAL; - mutex_lock(&opp_table->lock); + scoped_guard(mutex, &opp_table->lock) { + /* Do we have the frequency? */ + list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { + if (tmp_opp->rates[0] == freq) { + opp = dev_pm_opp_get(tmp_opp); - /* Do we have the frequency? */ - list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { - if (tmp_opp->rates[0] == freq) { - opp = tmp_opp; - break; - } - } + /* Is update really needed? */ + if (opp->available == availability_req) + return 0; - if (IS_ERR(opp)) { - r = PTR_ERR(opp); - goto unlock; + opp->available = availability_req; + break; + } + } } - /* Is update really needed? */ - if (opp->available == availability_req) - goto unlock; - - opp->available = availability_req; - - dev_pm_opp_get(opp); - mutex_unlock(&opp_table->lock); + if (IS_ERR(opp)) + return PTR_ERR(opp); /* Notify the change of the OPP availability */ if (availability_req) @@ -2927,14 +2812,7 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_DISABLE, opp); - dev_pm_opp_put(opp); - goto put_table; - -unlock: - mutex_unlock(&opp_table->lock); -put_table: - dev_pm_opp_put_opp_table(opp_table); - return r; + return 0; } /** @@ -2954,9 +2832,9 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, unsigned long u_volt_max) { - struct opp_table *opp_table; - struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); - int r = 0; + struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp; + struct opp_table *opp_table __free(put_opp_table); + int r; /* Find the opp_table */ opp_table = _find_opp_table(dev); @@ -2966,49 +2844,36 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, return r; } - if (!assert_single_clk(opp_table)) { - r = -EINVAL; - goto put_table; - } + if (!assert_single_clk(opp_table, 0)) + return -EINVAL; - mutex_lock(&opp_table->lock); + scoped_guard(mutex, &opp_table->lock) { + /* Do we have the frequency? */ + list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { + if (tmp_opp->rates[0] == freq) { + opp = dev_pm_opp_get(tmp_opp); - /* Do we have the frequency? */ - list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { - if (tmp_opp->rates[0] == freq) { - opp = tmp_opp; - break; - } - } + /* Is update really needed? */ + if (opp->supplies->u_volt == u_volt) + return 0; - if (IS_ERR(opp)) { - r = PTR_ERR(opp); - goto adjust_unlock; - } - - /* Is update really needed? */ - if (opp->supplies->u_volt == u_volt) - goto adjust_unlock; + opp->supplies->u_volt = u_volt; + opp->supplies->u_volt_min = u_volt_min; + opp->supplies->u_volt_max = u_volt_max; - opp->supplies->u_volt = u_volt; - opp->supplies->u_volt_min = u_volt_min; - opp->supplies->u_volt_max = u_volt_max; + break; + } + } + } - dev_pm_opp_get(opp); - mutex_unlock(&opp_table->lock); + if (IS_ERR(opp)) + return PTR_ERR(opp); /* Notify the voltage change of the OPP */ blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE, opp); - dev_pm_opp_put(opp); - goto put_table; - -adjust_unlock: - mutex_unlock(&opp_table->lock); -put_table: - dev_pm_opp_put_opp_table(opp_table); - return r; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); @@ -3022,9 +2887,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); */ int dev_pm_opp_sync_regulators(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); struct regulator *reg; - int i, ret = 0; + int ret, i; /* Device may not have OPP table */ opp_table = _find_opp_table(dev); @@ -3033,23 +2898,20 @@ int dev_pm_opp_sync_regulators(struct device *dev) /* Regulator may not be required for the device */ if (unlikely(!opp_table->regulators)) - goto put_table; + return 0; /* Nothing to sync if voltage wasn't changed */ if (!opp_table->enabled) - goto put_table; + return 0; for (i = 0; i < opp_table->regulator_count; i++) { reg = opp_table->regulators[i]; ret = regulator_sync_voltage(reg); if (ret) - break; + return ret; } -put_table: - /* Drop reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); @@ -3101,18 +2963,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_disable); */ int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) { - struct opp_table *opp_table; - int ret; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); - ret = blocking_notifier_chain_register(&opp_table->head, nb); - - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return blocking_notifier_chain_register(&opp_table->head, nb); } EXPORT_SYMBOL(dev_pm_opp_register_notifier); @@ -3126,18 +2983,13 @@ EXPORT_SYMBOL(dev_pm_opp_register_notifier); int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb) { - struct opp_table *opp_table; - int ret; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); - ret = blocking_notifier_chain_unregister(&opp_table->head, nb); - - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return blocking_notifier_chain_unregister(&opp_table->head, nb); } EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); @@ -3150,7 +3002,7 @@ EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); */ void dev_pm_opp_remove_table(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); /* Check for existing table for 'dev' */ opp_table = _find_opp_table(dev); @@ -3171,8 +3023,5 @@ void dev_pm_opp_remove_table(struct device *dev) **/ if (_opp_remove_all_static(opp_table)) dev_pm_opp_put_opp_table(opp_table); - - /* Drop reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c index 12c429b407ca..97989d4fe336 100644 --- a/drivers/opp/cpu.c +++ b/drivers/opp/cpu.c @@ -43,7 +43,6 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **opp_table) { - struct dev_pm_opp *opp; struct cpufreq_frequency_table *freq_table = NULL; int i, max_opps, ret = 0; unsigned long rate; @@ -57,6 +56,8 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, return -ENOMEM; for (i = 0, rate = 0; i < max_opps; i++, rate++) { + struct dev_pm_opp *opp __free(put_opp); + /* find next rate */ opp = dev_pm_opp_find_freq_ceil(dev, &rate); if (IS_ERR(opp)) { @@ -69,8 +70,6 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, /* Is Boost/turbo opp ? */ if (dev_pm_opp_is_turbo(opp)) freq_table[i].flags = CPUFREQ_BOOST_FREQ; - - dev_pm_opp_put(opp); } freq_table[i].driver_data = i; @@ -155,10 +154,10 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) { + struct opp_table *opp_table __free(put_opp_table); struct opp_device *opp_dev; - struct opp_table *opp_table; struct device *dev; - int cpu, ret = 0; + int cpu; opp_table = _find_opp_table(cpu_dev); if (IS_ERR(opp_table)) @@ -186,9 +185,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; } - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); @@ -204,33 +201,26 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); */ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { + struct opp_table *opp_table __free(put_opp_table); struct opp_device *opp_dev; - struct opp_table *opp_table; - int ret = 0; opp_table = _find_opp_table(cpu_dev); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); - if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) { - ret = -EINVAL; - goto put_opp_table; - } + if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) + return -EINVAL; cpumask_clear(cpumask); if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { - mutex_lock(&opp_table->lock); + guard(mutex)(&opp_table->lock); list_for_each_entry(opp_dev, &opp_table->dev_list, node) cpumask_set_cpu(opp_dev->dev->id, cpumask); - mutex_unlock(&opp_table->lock); } else { cpumask_set_cpu(cpu_dev->id, cpumask); } -put_opp_table: - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus); diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c index ec030b19164a..8fc6238b1728 100644 --- a/drivers/opp/debugfs.c +++ b/drivers/opp/debugfs.c @@ -37,10 +37,12 @@ static ssize_t bw_name_read(struct file *fp, char __user *userbuf, size_t count, loff_t *ppos) { struct icc_path *path = fp->private_data; + const char *name = icc_get_name(path); char buf[64]; - int i; + int i = 0; - i = scnprintf(buf, sizeof(buf), "%.62s\n", icc_get_name(path)); + if (name) + i = scnprintf(buf, sizeof(buf), "%.62s\n", name); return simple_read_from_buffer(userbuf, count, ppos, buf, i); } @@ -56,11 +58,11 @@ static void opp_debug_create_bw(struct dev_pm_opp *opp, struct dentry *pdentry) { struct dentry *d; - char name[20]; + char name[] = "icc-path-XXXXXXXXXXX"; /* Integers can take 11 chars max */ int i; for (i = 0; i < opp_table->path_count; i++) { - snprintf(name, sizeof(name), "icc-path-%.1d", i); + snprintf(name, sizeof(name), "icc-path-%d", i); /* Create per-path directory */ d = debugfs_create_dir(name, pdentry); @@ -78,7 +80,7 @@ static void opp_debug_create_clks(struct dev_pm_opp *opp, struct opp_table *opp_table, struct dentry *pdentry) { - char name[12]; + char name[] = "rate_hz_XXXXXXXXXXX"; /* Integers can take 11 chars max */ int i; if (opp_table->clk_count == 1) { @@ -100,7 +102,7 @@ static void opp_debug_create_supplies(struct dev_pm_opp *opp, int i; for (i = 0; i < opp_table->regulator_count; i++) { - char name[15]; + char name[] = "supply-XXXXXXXXXXX"; /* Integers can take 11 chars max */ snprintf(name, sizeof(name), "supply-%d", i); @@ -215,7 +217,7 @@ static void opp_migrate_dentry(struct opp_device *opp_dev, { struct opp_device *new_dev = NULL, *iter; const struct device *dev; - struct dentry *dentry; + int err; /* Look for next opp-dev */ list_for_each_entry(iter, &opp_table->dev_list, node) @@ -232,16 +234,14 @@ static void opp_migrate_dentry(struct opp_device *opp_dev, opp_set_dev_name(dev, opp_table->dentry_name); - dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir, - opp_table->dentry_name); - if (IS_ERR(dentry)) { + err = debugfs_change_name(opp_dev->dentry, "%s", opp_table->dentry_name); + if (err) { dev_err(dev, "%s: Failed to rename link from: %s to %s\n", __func__, dev_name(opp_dev->dev), dev_name(dev)); return; } - new_dev->dentry = dentry; - opp_table->dentry = dentry; + new_dev->dentry = opp_table->dentry = opp_dev->dentry; } /** diff --git a/drivers/opp/of.c b/drivers/opp/of.c index f9f0b22bccbb..505d79821584 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -45,7 +45,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node); struct opp_table *_managed_opp(struct device *dev, int index) { struct opp_table *opp_table, *managed_table = NULL; - struct device_node *np; + struct device_node *np __free(device_node); np = _opp_of_get_opp_desc_node(dev->of_node, index); if (!np) @@ -60,17 +60,13 @@ struct opp_table *_managed_opp(struct device *dev, int index) * But the OPPs will be considered as shared only if the * OPP table contains a "opp-shared" property. */ - if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { - _get_opp_table_kref(opp_table); - managed_table = opp_table; - } + if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) + managed_table = dev_pm_opp_get_opp_table_ref(opp_table); break; } } - of_node_put(np); - return managed_table; } @@ -80,18 +76,13 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table, { struct dev_pm_opp *opp; - mutex_lock(&opp_table->lock); + guard(mutex)(&opp_table->lock); list_for_each_entry(opp, &opp_table->opp_list, node) { - if (opp->np == opp_np) { - dev_pm_opp_get(opp); - mutex_unlock(&opp_table->lock); - return opp; - } + if (opp->np == opp_np) + return dev_pm_opp_get(opp); } - mutex_unlock(&opp_table->lock); - return NULL; } @@ -104,27 +95,20 @@ static struct device_node *of_parse_required_opp(struct device_node *np, /* The caller must call dev_pm_opp_put_opp_table() after the table is used */ static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np) { + struct device_node *opp_table_np __free(device_node); struct opp_table *opp_table; - struct device_node *opp_table_np; opp_table_np = of_get_parent(opp_np); if (!opp_table_np) - goto err; + return ERR_PTR(-ENODEV); - /* It is safe to put the node now as all we need now is its address */ - of_node_put(opp_table_np); + guard(mutex)(&opp_table_lock); - mutex_lock(&opp_table_lock); list_for_each_entry(opp_table, &opp_tables, node) { - if (opp_table_np == opp_table->np) { - _get_opp_table_kref(opp_table); - mutex_unlock(&opp_table_lock); - return opp_table; - } + if (opp_table_np == opp_table->np) + return dev_pm_opp_get_opp_table_ref(opp_table); } - mutex_unlock(&opp_table_lock); -err: return ERR_PTR(-ENODEV); } @@ -149,9 +133,8 @@ static void _opp_table_free_required_tables(struct opp_table *opp_table) opp_table->required_opp_count = 0; opp_table->required_opp_tables = NULL; - mutex_lock(&opp_table_lock); + guard(mutex)(&opp_table_lock); list_del(&opp_table->lazy); - mutex_unlock(&opp_table_lock); } /* @@ -163,7 +146,7 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, struct device_node *opp_np) { struct opp_table **required_opp_tables; - struct device_node *required_np, *np; + struct device_node *np __free(device_node); bool lazy = false; int count, i, size; @@ -171,30 +154,32 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, np = of_get_next_available_child(opp_np, NULL); if (!np) { dev_warn(dev, "Empty OPP table\n"); - return; } count = of_count_phandle_with_args(np, "required-opps", NULL); if (count <= 0) - goto put_np; + return; size = sizeof(*required_opp_tables) + sizeof(*opp_table->required_devs); required_opp_tables = kcalloc(count, size, GFP_KERNEL); if (!required_opp_tables) - goto put_np; + return; opp_table->required_opp_tables = required_opp_tables; opp_table->required_devs = (void *)(required_opp_tables + count); opp_table->required_opp_count = count; for (i = 0; i < count; i++) { + struct device_node *required_np __free(device_node); + required_np = of_parse_required_opp(np, i); - if (!required_np) - goto free_required_tables; + if (!required_np) { + _opp_table_free_required_tables(opp_table); + return; + } required_opp_tables[i] = _find_table_of_opp_np(required_np); - of_node_put(required_np); if (IS_ERR(required_opp_tables[i])) lazy = true; @@ -206,23 +191,15 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, * The OPP table is not held while allocating the table, take it * now to avoid corruption to the lazy_opp_tables list. */ - mutex_lock(&opp_table_lock); + guard(mutex)(&opp_table_lock); list_add(&opp_table->lazy, &lazy_opp_tables); - mutex_unlock(&opp_table_lock); } - - goto put_np; - -free_required_tables: - _opp_table_free_required_tables(opp_table); -put_np: - of_node_put(np); } void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index) { - struct device_node *np, *opp_np; + struct device_node *np __free(device_node), *opp_np; u32 val; /* @@ -243,8 +220,6 @@ void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, /* Get OPP table node */ opp_np = _opp_of_get_opp_desc_node(np, index); - of_node_put(np); - if (!opp_np) return; @@ -295,57 +270,22 @@ void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp) of_node_put(opp->np); } -static int _link_required_opps(struct dev_pm_opp *opp, struct opp_table *opp_table, +static int _link_required_opps(struct dev_pm_opp *opp, struct opp_table *required_table, int index) { - struct device_node *np; + struct device_node *np __free(device_node); np = of_parse_required_opp(opp->np, index); if (unlikely(!np)) return -ENODEV; opp->required_opps[index] = _find_opp_of_np(required_table, np); - of_node_put(np); - if (!opp->required_opps[index]) { pr_err("%s: Unable to find required OPP node: %pOF (%d)\n", __func__, opp->np, index); return -ENODEV; } - /* - * There are two genpd (as required-opp) cases that we need to handle, - * devices with a single genpd and ones with multiple genpds. - * - * The single genpd case requires special handling as we need to use the - * same `dev` structure (instead of a virtual one provided by genpd - * core) for setting the performance state. - * - * It doesn't make sense for a device's DT entry to have both - * "opp-level" and single "required-opps" entry pointing to a genpd's - * OPP, as that would make the OPP core call - * dev_pm_domain_set_performance_state() for two different values for - * the same device structure. Lets treat single genpd configuration as a - * case where the OPP's level is directly available without required-opp - * link in the DT. - * - * Just update the `level` with the right value, which - * dev_pm_opp_set_opp() will take care of in the normal path itself. - * - * There is another case though, where a genpd's OPP table has - * required-opps set to a parent genpd. The OPP core expects the user to - * set the respective required `struct device` pointer via - * dev_pm_opp_set_config(). - */ - if (required_table->is_genpd && opp_table->required_opp_count == 1 && - !opp_table->required_devs[0]) { - /* Genpd core takes care of propagation to parent genpd */ - if (!opp_table->is_genpd) { - if (!WARN_ON(opp->level != OPP_LEVEL_UNSET)) - opp->level = opp->required_opps[0]->level; - } - } - return 0; } @@ -370,7 +310,7 @@ static int _of_opp_alloc_required_opps(struct opp_table *opp_table, if (IS_ERR_OR_NULL(required_table)) continue; - ret = _link_required_opps(opp, opp_table, required_table, i); + ret = _link_required_opps(opp, required_table, i); if (ret) goto free_required_opps; } @@ -391,7 +331,7 @@ static int lazy_link_required_opps(struct opp_table *opp_table, int ret; list_for_each_entry(opp, &opp_table->opp_list, node) { - ret = _link_required_opps(opp, opp_table, new_table, index); + ret = _link_required_opps(opp, new_table, index); if (ret) return ret; } @@ -403,19 +343,22 @@ static int lazy_link_required_opps(struct opp_table *opp_table, static void lazy_link_required_opp_table(struct opp_table *new_table) { struct opp_table *opp_table, *temp, **required_opp_tables; - struct device_node *required_np, *opp_np, *required_table_np; struct dev_pm_opp *opp; int i, ret; - mutex_lock(&opp_table_lock); + guard(mutex)(&opp_table_lock); list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) { + struct device_node *opp_np __free(device_node); bool lazy = false; /* opp_np can't be invalid here */ opp_np = of_get_next_available_child(opp_table->np, NULL); for (i = 0; i < opp_table->required_opp_count; i++) { + struct device_node *required_np __free(device_node) = NULL; + struct device_node *required_table_np __free(device_node) = NULL; + required_opp_tables = opp_table->required_opp_tables; /* Required opp-table is already parsed */ @@ -426,9 +369,6 @@ static void lazy_link_required_opp_table(struct opp_table *new_table) required_np = of_parse_required_opp(opp_np, i); required_table_np = of_get_parent(required_np); - of_node_put(required_table_np); - of_node_put(required_np); - /* * Newly added table isn't the required opp-table for * opp_table. @@ -438,8 +378,7 @@ static void lazy_link_required_opp_table(struct opp_table *new_table) continue; } - required_opp_tables[i] = new_table; - _get_opp_table_kref(new_table); + required_opp_tables[i] = dev_pm_opp_get_opp_table_ref(new_table); /* Link OPPs now */ ret = lazy_link_required_opps(opp_table, new_table, i); @@ -450,8 +389,6 @@ static void lazy_link_required_opp_table(struct opp_table *new_table) } } - of_node_put(opp_np); - /* All required opp-tables found, remove from lazy list */ if (!lazy) { list_del_init(&opp_table->lazy); @@ -460,22 +397,22 @@ static void lazy_link_required_opp_table(struct opp_table *new_table) _required_opps_available(opp, opp_table->required_opp_count); } } - - mutex_unlock(&opp_table_lock); } static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) { - struct device_node *np, *opp_np; + struct device_node *opp_np __free(device_node) = NULL; + struct device_node *np __free(device_node) = NULL; struct property *prop; if (!opp_table) { + struct device_node *np __free(device_node); + np = of_node_get(dev->of_node); if (!np) return -ENODEV; opp_np = _opp_of_get_opp_desc_node(np, 0); - of_node_put(np); } else { opp_np = of_node_get(opp_table->np); } @@ -486,15 +423,12 @@ static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) /* Checking only first OPP is sufficient */ np = of_get_next_available_child(opp_np, NULL); - of_node_put(opp_np); if (!np) { dev_err(dev, "OPP table empty\n"); return -EINVAL; } prop = of_find_property(np, "opp-peak-kBps", NULL); - of_node_put(np); - if (!prop || !prop->length) return 0; @@ -504,7 +438,7 @@ static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table) { - struct device_node *np; + struct device_node *np __free(device_node) = of_node_get(dev->of_node); int ret, i, count, num_paths; struct icc_path **paths; @@ -514,15 +448,13 @@ int dev_pm_opp_of_find_icc_paths(struct device *dev, else if (ret <= 0) return ret; - ret = 0; - - np = of_node_get(dev->of_node); if (!np) return 0; + ret = 0; + count = of_count_phandle_with_args(np, "interconnects", "#interconnect-cells"); - of_node_put(np); if (count < 0) return 0; @@ -959,7 +891,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, ret = _of_opp_alloc_required_opps(opp_table, new_opp); if (ret) - goto free_opp; + goto put_node; if (!of_property_read_u32(np, "clock-latency-ns", &val)) new_opp->clock_latency_ns = val; @@ -1009,6 +941,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, free_required_opps: _of_opp_free_required_opps(opp_table, new_opp); +put_node: + of_node_put(np); free_opp: _opp_free(new_opp); @@ -1023,15 +957,14 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) struct dev_pm_opp *opp; /* OPP table is already initialized for the device */ - mutex_lock(&opp_table->lock); - if (opp_table->parsed_static_opps) { - opp_table->parsed_static_opps++; - mutex_unlock(&opp_table->lock); - return 0; - } + scoped_guard(mutex, &opp_table->lock) { + if (opp_table->parsed_static_opps) { + opp_table->parsed_static_opps++; + return 0; + } - opp_table->parsed_static_opps = 1; - mutex_unlock(&opp_table->lock); + opp_table->parsed_static_opps = 1; + } /* We have opp-table node now, iterate over it and add OPPs */ for_each_available_child_of_node(opp_table->np, np) { @@ -1071,15 +1004,14 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) const __be32 *val; int nr, ret = 0; - mutex_lock(&opp_table->lock); - if (opp_table->parsed_static_opps) { - opp_table->parsed_static_opps++; - mutex_unlock(&opp_table->lock); - return 0; - } + scoped_guard(mutex, &opp_table->lock) { + if (opp_table->parsed_static_opps) { + opp_table->parsed_static_opps++; + return 0; + } - opp_table->parsed_static_opps = 1; - mutex_unlock(&opp_table->lock); + opp_table->parsed_static_opps = 1; + } prop = of_find_property(dev->of_node, "operating-points", NULL); if (!prop) { @@ -1337,8 +1269,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { - struct device_node *np, *tmp_np, *cpu_np; - int cpu, ret = 0; + struct device_node *np __free(device_node); + int cpu; /* Get OPP descriptor node */ np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); @@ -1351,9 +1283,12 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, /* OPPs are shared ? */ if (!of_property_read_bool(np, "opp-shared")) - goto put_cpu_node; + return 0; for_each_possible_cpu(cpu) { + struct device_node *cpu_np __free(device_node) = NULL; + struct device_node *tmp_np __free(device_node) = NULL; + if (cpu == cpu_dev->id) continue; @@ -1361,29 +1296,22 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, if (!cpu_np) { dev_err(cpu_dev, "%s: failed to get cpu%d node\n", __func__, cpu); - ret = -ENOENT; - goto put_cpu_node; + return -ENOENT; } /* Get OPP descriptor node */ tmp_np = _opp_of_get_opp_desc_node(cpu_np, 0); - of_node_put(cpu_np); if (!tmp_np) { pr_err("%pOF: Couldn't find opp node\n", cpu_np); - ret = -ENOENT; - goto put_cpu_node; + return -ENOENT; } /* CPUs are sharing opp node */ if (np == tmp_np) cpumask_set_cpu(cpu, cpumask); - - of_node_put(tmp_np); } -put_cpu_node: - of_node_put(np); - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); @@ -1400,9 +1328,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); */ int of_get_required_opp_performance_state(struct device_node *np, int index) { - struct dev_pm_opp *opp; - struct device_node *required_np; - struct opp_table *opp_table; + struct device_node *required_np __free(device_node); + struct opp_table *opp_table __free(put_opp_table) = NULL; + struct dev_pm_opp *opp __free(put_opp) = NULL; int pstate = -EINVAL; required_np = of_parse_required_opp(np, index); @@ -1413,13 +1341,13 @@ int of_get_required_opp_performance_state(struct device_node *np, int index) if (IS_ERR(opp_table)) { pr_err("%s: Failed to find required OPP table %pOF: %ld\n", __func__, np, PTR_ERR(opp_table)); - goto put_required_np; + return PTR_ERR(opp_table); } /* The OPP tables must belong to a genpd */ if (unlikely(!opp_table->is_genpd)) { pr_err("%s: Performance state is only valid for genpds.\n", __func__); - goto put_required_np; + return -EINVAL; } opp = _find_opp_of_np(opp_table, required_np); @@ -1430,20 +1358,43 @@ int of_get_required_opp_performance_state(struct device_node *np, int index) } else { pstate = opp->level; } - dev_pm_opp_put(opp); - } - dev_pm_opp_put_opp_table(opp_table); - -put_required_np: - of_node_put(required_np); - return pstate; } EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state); /** + * dev_pm_opp_of_has_required_opp - Find out if a required-opps exists. + * @dev: The device to investigate. + * + * Returns true if the device's node has a "operating-points-v2" property and if + * the corresponding node for the opp-table describes opp nodes that uses the + * "required-opps" property. + * + * Return: True if a required-opps is present, else false. + */ +bool dev_pm_opp_of_has_required_opp(struct device *dev) +{ + struct device_node *np __free(device_node) = NULL, *opp_np __free(device_node); + int count; + + opp_np = _opp_of_get_opp_desc_node(dev->of_node, 0); + if (!opp_np) + return false; + + np = of_get_next_available_child(opp_np, NULL); + if (!np) { + dev_warn(dev, "Empty OPP table\n"); + return false; + } + + count = of_count_phandle_with_args(np, "required-opps", NULL); + + return count > 0; +} + +/** * dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp * @opp: opp for which DT node has to be returned for * @@ -1474,7 +1425,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); static int __maybe_unused _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz) { - struct dev_pm_opp *opp; + struct dev_pm_opp *opp __free(put_opp); unsigned long opp_freq, opp_power; /* Find the right frequency and related OPP */ @@ -1484,7 +1435,6 @@ _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz) return -EINVAL; opp_power = dev_pm_opp_get_power(opp); - dev_pm_opp_put(opp); if (!opp_power) return -EINVAL; @@ -1494,23 +1444,29 @@ _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz) return 0; } -/* - * Callback function provided to the Energy Model framework upon registration. +/** + * dev_pm_opp_calc_power() - Calculate power value for device with EM + * @dev : Device for which an Energy Model has to be registered + * @uW : New power value that is calculated + * @kHz : Frequency for which the new power is calculated + * * This computes the power estimated by @dev at @kHz if it is the frequency * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled * frequency and @uW to the associated power. The power is estimated as * P = C * V^2 * f with C being the device's capacitance and V and f * respectively the voltage and frequency of the OPP. + * It is also used as a callback function provided to the Energy Model + * framework upon registration. * * Returns -EINVAL if the power calculation failed because of missing * parameters, 0 otherwise. */ -static int __maybe_unused _get_power(struct device *dev, unsigned long *uW, - unsigned long *kHz) +int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW, + unsigned long *kHz) { - struct dev_pm_opp *opp; - struct device_node *np; + struct dev_pm_opp *opp __free(put_opp) = NULL; + struct device_node *np __free(device_node); unsigned long mV, Hz; u32 cap; u64 tmp; @@ -1521,7 +1477,6 @@ static int __maybe_unused _get_power(struct device *dev, unsigned long *uW, return -EINVAL; ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); - of_node_put(np); if (ret) return -EINVAL; @@ -1531,7 +1486,6 @@ static int __maybe_unused _get_power(struct device *dev, unsigned long *uW, return -EINVAL; mV = dev_pm_opp_get_voltage(opp) / 1000; - dev_pm_opp_put(opp); if (!mV) return -EINVAL; @@ -1544,23 +1498,19 @@ static int __maybe_unused _get_power(struct device *dev, unsigned long *uW, return 0; } +EXPORT_SYMBOL_GPL(dev_pm_opp_calc_power); static bool _of_has_opp_microwatt_property(struct device *dev) { - unsigned long power, freq = 0; - struct dev_pm_opp *opp; + struct dev_pm_opp *opp __free(put_opp); + unsigned long freq = 0; /* Check if at least one OPP has needed property */ opp = dev_pm_opp_find_freq_ceil(dev, &freq); if (IS_ERR(opp)) return false; - power = dev_pm_opp_get_power(opp); - dev_pm_opp_put(opp); - if (!power) - return false; - - return true; + return !!dev_pm_opp_get_power(opp); } /** @@ -1576,8 +1526,8 @@ static bool _of_has_opp_microwatt_property(struct device *dev) */ int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus) { + struct device_node *np __free(device_node) = NULL; struct em_data_callback em_cb; - struct device_node *np; int ret, nr_opp; u32 cap; @@ -1612,14 +1562,13 @@ int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus) * user about the inconsistent configuration. */ ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); - of_node_put(np); if (ret || !cap) { dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n"); ret = -EINVAL; goto failed; } - EM_SET_ACTIVE_POWER_CB(em_cb, _get_power); + EM_SET_ACTIVE_POWER_CB(em_cb, dev_pm_opp_calc_power); register_em: ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus, true); diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index cff1fabd1ae3..9eba63e01a9e 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -34,13 +34,13 @@ extern struct list_head opp_tables; #define OPP_CONFIG_REGULATOR_HELPER BIT(2) #define OPP_CONFIG_PROP_NAME BIT(3) #define OPP_CONFIG_SUPPORTED_HW BIT(4) -#define OPP_CONFIG_GENPD BIT(5) -#define OPP_CONFIG_REQUIRED_DEVS BIT(6) +#define OPP_CONFIG_REQUIRED_DEV BIT(5) /** * struct opp_config_data - data for set config operations * @opp_table: OPP table * @flags: OPP config flags + * @required_dev_index: The position in the array of required_devs * * This structure stores the OPP config information for each OPP table * configuration by the callers. @@ -48,6 +48,7 @@ extern struct list_head opp_tables; struct opp_config_data { struct opp_table *opp_table; unsigned int flags; + unsigned int required_dev_index; }; /** @@ -249,9 +250,7 @@ struct opp_table { }; /* Routines internal to opp core */ -void dev_pm_opp_get(struct dev_pm_opp *opp); bool _opp_remove_all_static(struct opp_table *opp_table); -void _get_opp_table_kref(struct opp_table *opp_table); int _get_opp_count(struct opp_table *opp_table); struct opp_table *_find_opp_table(struct device *dev); struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table); @@ -262,9 +261,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *o int _opp_add_v1(struct opp_table *opp_table, struct device *dev, struct dev_pm_opp_data *data, bool dynamic); void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu); struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk); -void _put_opp_list_kref(struct opp_table *opp_table); void _required_opps_available(struct dev_pm_opp *opp, int count); -void _update_set_required_opps(struct opp_table *opp_table); static inline bool lazy_linking_pending(struct opp_table *opp_table) { diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c index e3b97cd1fbbf..5f0fb3ea385b 100644 --- a/drivers/opp/ti-opp-supply.c +++ b/drivers/opp/ti-opp-supply.c @@ -393,17 +393,19 @@ static int ti_opp_supply_probe(struct platform_device *pdev) } ret = dev_pm_opp_set_config_regulators(cpu_dev, ti_opp_config_regulators); - if (ret < 0) + if (ret < 0) { _free_optimized_voltages(dev, &opp_data); + return ret; + } - return ret; + return 0; } static struct platform_driver ti_opp_supply_driver = { .probe = ti_opp_supply_probe, .driver = { .name = "ti_opp_supply", - .of_match_table = of_match_ptr(ti_opp_supply_of_match), + .of_match_table = ti_opp_supply_of_match, }, }; module_platform_driver(ti_opp_supply_driver); |