diff options
Diffstat (limited to 'drivers/opp')
| -rw-r--r-- | drivers/opp/Kconfig | 1 | ||||
| -rw-r--r-- | drivers/opp/core.c | 1241 | ||||
| -rw-r--r-- | drivers/opp/cpu.c | 42 | ||||
| -rw-r--r-- | drivers/opp/debugfs.c | 25 | ||||
| -rw-r--r-- | drivers/opp/of.c | 430 | ||||
| -rw-r--r-- | drivers/opp/opp.h | 37 | ||||
| -rw-r--r-- | drivers/opp/ti-opp-supply.c | 21 |
7 files changed, 924 insertions, 873 deletions
diff --git a/drivers/opp/Kconfig b/drivers/opp/Kconfig index e8ce47b32735..d7c649a1a981 100644 --- a/drivers/opp/Kconfig +++ b/drivers/opp/Kconfig @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only config PM_OPP bool - select SRCU help SOCs have a standard set of tuples consisting of frequency and voltage pairs that the device will support per voltage domain. This diff --git a/drivers/opp/core.c b/drivers/opp/core.c index e87567dbe99f..dbebb8c829bc 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -29,9 +29,6 @@ */ LIST_HEAD(opp_tables); -/* OPP tables with uninitialized required OPPs */ -LIST_HEAD(lazy_opp_tables); - /* Lock to allow exclusive modification to the device and opp lists */ DEFINE_MUTEX(opp_table_lock); /* Flag indicating that opp_tables list is being updated at the moment */ @@ -43,17 +40,14 @@ static DEFINE_XARRAY_ALLOC1(opp_configs); static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table) { struct opp_device *opp_dev; - bool found = false; - mutex_lock(&opp_table->lock); + guard(mutex)(&opp_table->lock); + list_for_each_entry(opp_dev, &opp_table->dev_list, node) - if (opp_dev->dev == dev) { - found = true; - break; - } + if (opp_dev->dev == dev) + return true; - mutex_unlock(&opp_table->lock); - return found; + return false; } static struct opp_table *_find_opp_table_unlocked(struct device *dev) @@ -61,10 +55,8 @@ static struct opp_table *_find_opp_table_unlocked(struct device *dev) struct opp_table *opp_table; list_for_each_entry(opp_table, &opp_tables, node) { - if (_find_opp_dev(dev, opp_table)) { - _get_opp_table_kref(opp_table); - return opp_table; - } + if (_find_opp_dev(dev, opp_table)) + return dev_pm_opp_get_opp_table_ref(opp_table); } return ERR_PTR(-ENODEV); @@ -83,18 +75,13 @@ static struct opp_table *_find_opp_table_unlocked(struct device *dev) */ struct opp_table *_find_opp_table(struct device *dev) { - struct opp_table *opp_table; - if (IS_ERR_OR_NULL(dev)) { pr_err("%s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); } - mutex_lock(&opp_table_lock); - opp_table = _find_opp_table_unlocked(dev); - mutex_unlock(&opp_table_lock); - - return opp_table; + guard(mutex)(&opp_table_lock); + return _find_opp_table_unlocked(dev); } /* @@ -104,11 +91,55 @@ struct opp_table *_find_opp_table(struct device *dev) * representation in the OPP table and manage the clock configuration themselves * in an platform specific way. */ -static bool assert_single_clk(struct opp_table *opp_table) +static bool assert_single_clk(struct opp_table *opp_table, + unsigned int __always_unused index) { return !WARN_ON(opp_table->clk_count > 1); } +/* + * Returns true if clock table is large enough to contain the clock index. + */ +static bool assert_clk_index(struct opp_table *opp_table, + unsigned int index) +{ + return opp_table->clk_count > index; +} + +/* + * Returns true if bandwidth table is large enough to contain the bandwidth index. + */ +static bool assert_bandwidth_index(struct opp_table *opp_table, + unsigned int index) +{ + return opp_table->path_count > index; +} + +/** + * dev_pm_opp_get_bw() - Gets the bandwidth corresponding to an opp + * @opp: opp for which bandwidth has to be returned for + * @peak: select peak or average bandwidth + * @index: bandwidth index + * + * Return: bandwidth in kBps, else return 0 + */ +unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index) +{ + if (IS_ERR_OR_NULL(opp)) { + pr_err("%s: Invalid parameters\n", __func__); + return 0; + } + + if (index >= opp->opp_table->path_count) + return 0; + + if (!opp->bandwidth) + return 0; + + return peak ? opp->bandwidth[index].peak : opp->bandwidth[index].avg; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_bw); + /** * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp * @opp: opp for which voltage has to be returned for @@ -180,32 +211,31 @@ unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp) EXPORT_SYMBOL_GPL(dev_pm_opp_get_power); /** - * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp - * @opp: opp for which frequency has to be returned for + * dev_pm_opp_get_freq_indexed() - Gets the frequency corresponding to an + * available opp with specified index + * @opp: opp for which frequency has to be returned for + * @index: index of the frequency within the required opp * - * Return: frequency in hertz corresponding to the opp, else - * return 0 + * Return: frequency in hertz corresponding to the opp with specified index, + * else return 0 */ -unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) +unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index) { - if (IS_ERR_OR_NULL(opp)) { + if (IS_ERR_OR_NULL(opp) || index >= opp->opp_table->clk_count) { pr_err("%s: Invalid parameters\n", __func__); return 0; } - if (!assert_single_clk(opp->opp_table)) - return 0; - - return opp->rates[0]; + return opp->rates[index]; } -EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); +EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed); /** * dev_pm_opp_get_level() - Gets the level corresponding to an available opp * @opp: opp for which level value has to be returned for * * Return: level read from device tree corresponding to the opp, else - * return 0. + * return U32_MAX. */ unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) { @@ -225,7 +255,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_level); * @index: index of the required opp * * Return: performance state read from device tree corresponding to the - * required opp, else return 0. + * required opp, else return U32_MAX. */ unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, unsigned int index) @@ -240,7 +270,13 @@ unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, if (lazy_linking_pending(opp->opp_table)) return 0; - return opp->required_opps[index]->pstate; + /* The required OPP table must belong to a genpd */ + if (unlikely(!opp->opp_table->required_opp_tables[index]->is_genpd)) { + pr_err("%s: Performance state is only valid for genpds.\n", __func__); + return 0; + } + + return opp->required_opps[index]->level; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate); @@ -273,18 +309,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); */ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) { - struct opp_table *opp_table; - unsigned long clock_latency_ns; + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(dev); - opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return 0; - clock_latency_ns = opp_table->clock_latency_ns_max; - - dev_pm_opp_put_opp_table(opp_table); - - return clock_latency_ns; + return opp_table->clock_latency_ns_max; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); @@ -296,7 +327,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); */ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) { - struct opp_table *opp_table; struct dev_pm_opp *opp; struct regulator *reg; unsigned long latency_ns = 0; @@ -306,39 +336,39 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) unsigned long max; } *uV; - opp_table = _find_opp_table(dev); + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(dev); + if (IS_ERR(opp_table)) return 0; /* Regulator may not be required for the device */ if (!opp_table->regulators) - goto put_opp_table; + return 0; count = opp_table->regulator_count; uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); if (!uV) - goto put_opp_table; - - mutex_lock(&opp_table->lock); + return 0; - for (i = 0; i < count; i++) { - uV[i].min = ~0; - uV[i].max = 0; + scoped_guard(mutex, &opp_table->lock) { + for (i = 0; i < count; i++) { + uV[i].min = ~0; + uV[i].max = 0; - list_for_each_entry(opp, &opp_table->opp_list, node) { - if (!opp->available) - continue; + list_for_each_entry(opp, &opp_table->opp_list, node) { + if (!opp->available) + continue; - if (opp->supplies[i].u_volt_min < uV[i].min) - uV[i].min = opp->supplies[i].u_volt_min; - if (opp->supplies[i].u_volt_max > uV[i].max) - uV[i].max = opp->supplies[i].u_volt_max; + if (opp->supplies[i].u_volt_min < uV[i].min) + uV[i].min = opp->supplies[i].u_volt_min; + if (opp->supplies[i].u_volt_max > uV[i].max) + uV[i].max = opp->supplies[i].u_volt_max; + } } } - mutex_unlock(&opp_table->lock); - /* * The caller needs to ensure that opp_table (and hence the regulator) * isn't freed, while we are executing this routine. @@ -351,8 +381,6 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) } kfree(uV); -put_opp_table: - dev_pm_opp_put_opp_table(opp_table); return latency_ns; } @@ -382,18 +410,17 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency); */ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) { - struct opp_table *opp_table; unsigned long freq = 0; - opp_table = _find_opp_table(dev); + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(dev); + if (IS_ERR(opp_table)) return 0; if (opp_table->suspend_opp && opp_table->suspend_opp->available) freq = dev_pm_opp_get_freq(opp_table->suspend_opp); - dev_pm_opp_put_opp_table(opp_table); - return freq; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq); @@ -403,15 +430,13 @@ int _get_opp_count(struct opp_table *opp_table) struct dev_pm_opp *opp; int count = 0; - mutex_lock(&opp_table->lock); + guard(mutex)(&opp_table->lock); list_for_each_entry(opp, &opp_table->opp_list, node) { if (opp->available) count++; } - mutex_unlock(&opp_table->lock); - return count; } @@ -424,28 +449,23 @@ int _get_opp_count(struct opp_table *opp_table) */ int dev_pm_opp_get_opp_count(struct device *dev) { - struct opp_table *opp_table; - int count; + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(dev); - opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { - count = PTR_ERR(opp_table); - dev_dbg(dev, "%s: OPP table not found (%d)\n", - __func__, count); - return count; + dev_dbg(dev, "%s: OPP table not found (%ld)\n", + __func__, PTR_ERR(opp_table)); + return PTR_ERR(opp_table); } - count = _get_opp_count(opp_table); - dev_pm_opp_put_opp_table(opp_table); - - return count; + return _get_opp_count(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); /* Helpers to read keys */ static unsigned long _read_freq(struct dev_pm_opp *opp, int index) { - return opp->rates[0]; + return opp->rates[index]; } static unsigned long _read_level(struct dev_pm_opp *opp, int index) @@ -458,6 +478,16 @@ static unsigned long _read_bw(struct dev_pm_opp *opp, int index) return opp->bandwidth[index].peak; } +static unsigned long _read_opp_key(struct dev_pm_opp *opp, int index, + struct dev_pm_opp_key *key) +{ + key->bw = opp->bandwidth ? opp->bandwidth[index].peak : 0; + key->freq = opp->rates[index]; + key->level = opp->level; + + return true; +} + /* Generic comparison helpers */ static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, unsigned long opp_key, unsigned long key) @@ -491,21 +521,37 @@ static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, return false; } +static bool _compare_opp_key_exact(struct dev_pm_opp **opp, + struct dev_pm_opp *temp_opp, struct dev_pm_opp_key *opp_key, + struct dev_pm_opp_key *key) +{ + bool level_match = (key->level == OPP_LEVEL_UNSET || opp_key->level == key->level); + bool freq_match = (key->freq == 0 || opp_key->freq == key->freq); + bool bw_match = (key->bw == 0 || opp_key->bw == key->bw); + + if (freq_match && level_match && bw_match) { + *opp = temp_opp; + return true; + } + + return false; +} + /* Generic key finding helpers */ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table, unsigned long *key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, unsigned long opp_key, unsigned long key), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); /* Assert that the requirement is met */ - if (assert && !assert(opp_table)) + if (assert && !assert(opp_table, index)) return ERR_PTR(-EINVAL); - mutex_lock(&opp_table->lock); + guard(mutex)(&opp_table->lock); list_for_each_entry(temp_opp, &opp_table->opp_list, node) { if (temp_opp->available == available) { @@ -520,7 +566,36 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table, dev_pm_opp_get(opp); } - mutex_unlock(&opp_table->lock); + return opp; +} + +static struct dev_pm_opp *_opp_table_find_opp_key(struct opp_table *opp_table, + struct dev_pm_opp_key *key, bool available, + unsigned long (*read)(struct dev_pm_opp *opp, int index, + struct dev_pm_opp_key *key), + bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, + struct dev_pm_opp_key *opp_key, struct dev_pm_opp_key *key), + bool (*assert)(struct opp_table *opp_table, unsigned int index)) +{ + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); + struct dev_pm_opp_key temp_key; + + /* Assert that the requirement is met */ + if (!assert(opp_table, 0)) + return ERR_PTR(-EINVAL); + + guard(mutex)(&opp_table->lock); + + list_for_each_entry(temp_opp, &opp_table->opp_list, node) { + if (temp_opp->available == available) { + read(temp_opp, 0, &temp_key); + if (compare(&opp, temp_opp, &temp_key, key)) { + /* Increment the reference count of OPP */ + dev_pm_opp_get(opp); + break; + } + } + } return opp; } @@ -530,30 +605,25 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, unsigned long opp_key, unsigned long key), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { - struct opp_table *opp_table; - struct dev_pm_opp *opp; + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(dev); - opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { dev_err(dev, "%s: OPP table not found (%ld)\n", __func__, PTR_ERR(opp_table)); return ERR_CAST(opp_table); } - opp = _opp_table_find_key(opp_table, key, index, available, read, - compare, assert); - - dev_pm_opp_put_opp_table(opp_table); - - return opp; + return _opp_table_find_key(opp_table, key, index, available, read, + compare, assert); } static struct dev_pm_opp *_find_key_exact(struct device *dev, unsigned long key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { /* * The value of key will be updated here, but will be ignored as the @@ -566,7 +636,7 @@ static struct dev_pm_opp *_find_key_exact(struct device *dev, static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table, unsigned long *key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { return _opp_table_find_key(opp_table, key, index, available, read, _compare_ceil, assert); @@ -575,7 +645,7 @@ static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table, static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { return _find_key(dev, key, index, available, read, _compare_ceil, assert); @@ -584,7 +654,7 @@ static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key, static struct dev_pm_opp *_find_key_floor(struct device *dev, unsigned long *key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { return _find_key(dev, key, index, available, read, _compare_floor, assert); @@ -621,6 +691,77 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, } EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); +/** + * dev_pm_opp_find_key_exact() - Search for an OPP with exact key set + * @dev: Device for which the OPP is being searched + * @key: OPP key set to match + * @available: true/false - match for available OPP + * + * Search for an exact match of the key set in the OPP table. + * + * Return: A matching opp on success, else ERR_PTR in case of error. + * Possible error values: + * EINVAL: for bad pointers + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * Note: 'available' is a modifier for the search. If 'available' == true, + * then the match is for exact matching key and is available in the stored + * OPP table. If false, the match is for exact key which is not available. + * + * This provides a mechanism to enable an OPP which is not available currently + * or the opposite as well. + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev, + struct dev_pm_opp_key *key, + bool available) +{ + struct opp_table *opp_table __free(put_opp_table) = _find_opp_table(dev); + + if (IS_ERR(opp_table)) { + dev_err(dev, "%s: OPP table not found (%ld)\n", __func__, + PTR_ERR(opp_table)); + return ERR_CAST(opp_table); + } + + return _opp_table_find_opp_key(opp_table, key, available, + _read_opp_key, _compare_opp_key_exact, + assert_single_clk); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_key_exact); + +/** + * dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the + * clock corresponding to the index + * @dev: Device for which we do this operation + * @freq: frequency to search for + * @index: Clock index + * @available: true/false - match for available opp + * + * Search for the matching exact OPP for the clock corresponding to the + * specified index from a starting freq for a device. + * + * Return: matching *opp , else returns ERR_PTR in case of error and should be + * handled using IS_ERR. Error return values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp * +dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq, + u32 index, bool available) +{ + return _find_key_exact(dev, freq, index, available, _read_freq, + assert_clk_index); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed); + static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, unsigned long *freq) { @@ -654,6 +795,35 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); /** + * dev_pm_opp_find_freq_ceil_indexed() - Search for a rounded ceil freq for the + * clock corresponding to the index + * @dev: Device for which we do this operation + * @freq: Start frequency + * @index: Clock index + * + * Search for the matching ceil *available* OPP for the clock corresponding to + * the specified index from a starting freq for a device. + * + * Return: matching *opp and refreshes *freq accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. Error return + * values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp * +dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq, + u32 index) +{ + return _find_key_ceil(dev, freq, index, true, _read_freq, + assert_clk_index); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed); + +/** * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq * @dev: device for which we do this operation * @freq: Start frequency @@ -679,6 +849,34 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); /** + * dev_pm_opp_find_freq_floor_indexed() - Search for a rounded floor freq for the + * clock corresponding to the index + * @dev: Device for which we do this operation + * @freq: Start frequency + * @index: Clock index + * + * Search for the matching floor *available* OPP for the clock corresponding to + * the specified index from a starting freq for a device. + * + * Return: matching *opp and refreshes *freq accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. Error return + * values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp * +dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq, + u32 index) +{ + return _find_key_floor(dev, freq, index, true, _read_freq, assert_clk_index); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed); + +/** * dev_pm_opp_find_level_exact() - search for an exact level * @dev: device for which we do this operation * @level: level to search for @@ -722,12 +920,52 @@ struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, struct dev_pm_opp *opp; opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL); + if (IS_ERR(opp)) + return opp; + + /* False match */ + if (temp == OPP_LEVEL_UNSET) { + dev_err(dev, "%s: OPP levels aren't available\n", __func__); + dev_pm_opp_put(opp); + return ERR_PTR(-ENODEV); + } + *level = temp; return opp; } EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil); /** + * dev_pm_opp_find_level_floor() - Search for a rounded floor level + * @dev: device for which we do this operation + * @level: Start level + * + * Search for the matching floor *available* OPP from a starting level + * for a device. + * + * Return: matching *opp and refreshes *level accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. Error return + * values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev, + unsigned int *level) +{ + unsigned long temp = *level; + struct dev_pm_opp *opp; + + opp = _find_key_floor(dev, &temp, 0, true, _read_level, NULL); + *level = temp; + return opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_floor); + +/** * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth * @dev: device for which we do this operation * @bw: start bandwidth @@ -752,7 +990,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, unsigned long temp = *bw; struct dev_pm_opp *opp; - opp = _find_key_ceil(dev, &temp, index, true, _read_bw, NULL); + opp = _find_key_ceil(dev, &temp, index, true, _read_bw, + assert_bandwidth_index); *bw = temp; return opp; } @@ -783,7 +1022,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, unsigned long temp = *bw; struct dev_pm_opp *opp; - opp = _find_key_floor(dev, &temp, index, true, _read_bw, NULL); + opp = _find_key_floor(dev, &temp, index, true, _read_bw, + assert_bandwidth_index); *bw = temp; return opp; } @@ -837,7 +1077,7 @@ _opp_config_clk_single(struct device *dev, struct opp_table *opp_table, dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, ret); } else { - opp_table->rate_clk_single = freq; + opp_table->current_rate_single_clk = freq; } return ret; @@ -935,80 +1175,66 @@ static int _set_opp_bw(const struct opp_table *opp_table, return 0; } -static int _set_required_opp(struct device *dev, struct device *pd_dev, - struct dev_pm_opp *opp, int i) +static int _set_opp_level(struct device *dev, struct dev_pm_opp *opp) { - unsigned int pstate = likely(opp) ? opp->required_opps[i]->pstate : 0; - int ret; + unsigned int level = 0; + int ret = 0; - if (!pd_dev) - return 0; + if (opp) { + if (opp->level == OPP_LEVEL_UNSET) + return 0; - ret = dev_pm_genpd_set_performance_state(pd_dev, pstate); - if (ret) { - dev_err(dev, "Failed to set performance state of %s: %d (%d)\n", - dev_name(pd_dev), pstate, ret); + level = opp->level; } + /* Request a new performance state through the device's PM domain. */ + ret = dev_pm_domain_set_performance_state(dev, level); + if (ret) + dev_err(dev, "Failed to set performance state %u (%d)\n", level, + ret); + return ret; } /* This is only called for PM domain for now */ -static int _set_required_opps(struct device *dev, - struct opp_table *opp_table, +static int _set_required_opps(struct device *dev, struct opp_table *opp_table, struct dev_pm_opp *opp, bool up) { - struct opp_table **required_opp_tables = opp_table->required_opp_tables; - struct device **genpd_virt_devs = opp_table->genpd_virt_devs; - int i, ret = 0; + struct device **devs = opp_table->required_devs; + struct dev_pm_opp *required_opp; + int index, target, delta, ret; - if (!required_opp_tables) + if (!devs) return 0; /* required-opps not fully initialized yet */ if (lazy_linking_pending(opp_table)) return -EBUSY; - /* - * We only support genpd's OPPs in the "required-opps" for now, as we - * don't know much about other use cases. Error out if the required OPP - * doesn't belong to a genpd. - */ - if (unlikely(!required_opp_tables[0]->is_genpd)) { - dev_err(dev, "required-opps don't belong to a genpd\n"); - return -ENOENT; - } - - /* Single genpd case */ - if (!genpd_virt_devs) - return _set_required_opp(dev, dev, opp, 0); - - /* Multiple genpd case */ - - /* - * Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev - * after it is freed from another thread. - */ - mutex_lock(&opp_table->genpd_virt_dev_lock); - /* Scaling up? Set required OPPs in normal order, else reverse */ if (up) { - for (i = 0; i < opp_table->required_opp_count; i++) { - ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i); - if (ret) - break; - } + index = 0; + target = opp_table->required_opp_count; + delta = 1; } else { - for (i = opp_table->required_opp_count - 1; i >= 0; i--) { - ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i); + index = opp_table->required_opp_count - 1; + target = -1; + delta = -1; + } + + while (index != target) { + if (devs[index]) { + required_opp = opp ? opp->required_opps[index] : NULL; + + ret = _set_opp_level(devs[index], required_opp); if (ret) - break; + return ret; } - } - mutex_unlock(&opp_table->genpd_virt_dev_lock); + index += delta; + } - return ret; + return 0; } static void _find_current_opp(struct device *dev, struct opp_table *opp_table) @@ -1027,10 +1253,9 @@ static void _find_current_opp(struct device *dev, struct opp_table *opp_table) * make special checks to validate current_opp. */ if (IS_ERR(opp)) { - mutex_lock(&opp_table->lock); - opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node); - dev_pm_opp_get(opp); - mutex_unlock(&opp_table->lock); + guard(mutex)(&opp_table->lock); + opp = dev_pm_opp_get(list_first_entry(&opp_table->opp_list, + struct dev_pm_opp, node)); } opp_table->current_opp = opp; @@ -1058,8 +1283,13 @@ static int _disable_opp_table(struct device *dev, struct opp_table *opp_table) if (opp_table->regulators) regulator_disable(opp_table->regulators[0]); + ret = _set_opp_level(dev, NULL); + if (ret) + goto out; + ret = _set_required_opps(dev, opp_table, NULL, false); +out: opp_table->enabled = false; return ret; } @@ -1081,7 +1311,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, /* Return early if nothing to do */ if (!forced && old_opp == opp && opp_table->enabled) { - dev_dbg(dev, "%s: OPPs are same, nothing to do\n", __func__); + dev_dbg_ratelimited(dev, "%s: OPPs are same, nothing to do\n", __func__); return 0; } @@ -1102,6 +1332,10 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, return ret; } + ret = _set_opp_level(dev, opp); + if (ret) + return ret; + ret = _set_opp_bw(opp_table, opp, dev); if (ret) { dev_err(dev, "Failed to set bw: %d\n", ret); @@ -1145,6 +1379,10 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, return ret; } + ret = _set_opp_level(dev, opp); + if (ret) + return ret; + ret = _set_required_opps(dev, opp_table, opp, false); if (ret) { dev_err(dev, "Failed to set required opps: %d\n", ret); @@ -1156,8 +1394,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, dev_pm_opp_put(old_opp); /* Make sure current_opp doesn't get freed */ - dev_pm_opp_get(opp); - opp_table->current_opp = opp; + opp_table->current_opp = dev_pm_opp_get(opp); return ret; } @@ -1175,13 +1412,13 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, */ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { - struct opp_table *opp_table; + struct dev_pm_opp *opp __free(put_opp) = NULL; unsigned long freq = 0, temp_freq; - struct dev_pm_opp *opp = NULL; bool forced = false; - int ret; - opp_table = _find_opp_table(dev); + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(dev); + if (IS_ERR(opp_table)) { dev_err(dev, "%s: device's opp table doesn't exist\n", __func__); return PTR_ERR(opp_table); @@ -1196,9 +1433,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) * equivalent to a clk_set_rate() */ if (!_get_opp_count(opp_table)) { - ret = opp_table->config_clks(dev, opp_table, NULL, - &target_freq, false); - goto put_opp_table; + return opp_table->config_clks(dev, opp_table, NULL, + &target_freq, false); } freq = clk_round_rate(opp_table->clk, target_freq); @@ -1213,10 +1449,9 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) temp_freq = freq; opp = _find_freq_ceil(opp_table, &temp_freq); if (IS_ERR(opp)) { - ret = PTR_ERR(opp); - dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", - __func__, freq, ret); - goto put_opp_table; + dev_err(dev, "%s: failed to find OPP for freq %lu (%ld)\n", + __func__, freq, PTR_ERR(opp)); + return PTR_ERR(opp); } /* @@ -1226,17 +1461,10 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) * value of the frequency. In such a case, do not abort but * configure the hardware to the desired frequency forcefully. */ - forced = opp_table->rate_clk_single != target_freq; + forced = opp_table->current_rate_single_clk != freq; } - ret = _set_opp(dev, opp_table, opp, &target_freq, forced); - - if (target_freq) - dev_pm_opp_put(opp); - -put_opp_table: - dev_pm_opp_put_opp_table(opp_table); - return ret; + return _set_opp(dev, opp_table, opp, &freq, forced); } EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); @@ -1252,19 +1480,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); */ int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) { - struct opp_table *opp_table; - int ret; + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(dev); - opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { dev_err(dev, "%s: device opp doesn't exist\n", __func__); return PTR_ERR(opp_table); } - ret = _set_opp(dev, opp_table, opp, NULL, false); - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return _set_opp(dev, opp_table, opp, NULL, false); } EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp); @@ -1289,9 +1513,8 @@ struct opp_device *_add_opp_dev(const struct device *dev, /* Initialize opp-dev */ opp_dev->dev = dev; - mutex_lock(&opp_table->lock); - list_add(&opp_dev->node, &opp_table->dev_list); - mutex_unlock(&opp_table->lock); + scoped_guard(mutex, &opp_table->lock) + list_add(&opp_dev->node, &opp_table->dev_list); /* Create debugfs entries for the opp_table */ opp_debug_register(opp_dev, opp_table); @@ -1314,7 +1537,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) return ERR_PTR(-ENOMEM); mutex_init(&opp_table->lock); - mutex_init(&opp_table->genpd_virt_dev_lock); INIT_LIST_HEAD(&opp_table->dev_list); INIT_LIST_HEAD(&opp_table->lazy); @@ -1348,17 +1570,14 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) return opp_table; remove_opp_dev: + _of_clear_opp_table(opp_table); _remove_opp_dev(opp_dev, opp_table); + mutex_destroy(&opp_table->lock); err: kfree(opp_table); return ERR_PTR(ret); } -void _get_opp_table_kref(struct opp_table *opp_table) -{ - kref_get(&opp_table->kref); -} - static struct opp_table *_update_opp_table_clk(struct device *dev, struct opp_table *opp_table, bool getclk) @@ -1512,22 +1731,20 @@ static void _opp_table_kref_release(struct kref *kref) WARN_ON(!list_empty(&opp_table->opp_list)); - list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) { - /* - * The OPP table is getting removed, drop the performance state - * constraints. - */ - if (opp_table->genpd_performance_state) - dev_pm_genpd_set_performance_state((struct device *)(opp_dev->dev), 0); - + list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) _remove_opp_dev(opp_dev, opp_table); - } - mutex_destroy(&opp_table->genpd_virt_dev_lock); mutex_destroy(&opp_table->lock); kfree(opp_table); } +struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table) +{ + kref_get(&opp_table->kref); + return opp_table; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table_ref); + void dev_pm_opp_put_opp_table(struct opp_table *opp_table) { kref_put_mutex(&opp_table->kref, _opp_table_kref_release, @@ -1558,10 +1775,12 @@ static void _opp_kref_release(struct kref *kref) kfree(opp); } -void dev_pm_opp_get(struct dev_pm_opp *opp) +struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp) { kref_get(&opp->kref); + return opp; } +EXPORT_SYMBOL_GPL(dev_pm_opp_get); void dev_pm_opp_put(struct dev_pm_opp *opp) { @@ -1579,26 +1798,25 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put); void dev_pm_opp_remove(struct device *dev, unsigned long freq) { struct dev_pm_opp *opp = NULL, *iter; - struct opp_table *opp_table; - opp_table = _find_opp_table(dev); + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(dev); + if (IS_ERR(opp_table)) return; - if (!assert_single_clk(opp_table)) - goto put_table; - - mutex_lock(&opp_table->lock); + if (!assert_single_clk(opp_table, 0)) + return; - list_for_each_entry(iter, &opp_table->opp_list, node) { - if (iter->rates[0] == freq) { - opp = iter; - break; + scoped_guard(mutex, &opp_table->lock) { + list_for_each_entry(iter, &opp_table->opp_list, node) { + if (iter->rates[0] == freq) { + opp = iter; + break; + } } } - mutex_unlock(&opp_table->lock); - if (opp) { dev_pm_opp_put(opp); @@ -1608,32 +1826,26 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", __func__, freq); } - -put_table: - /* Drop the reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove); static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table, bool dynamic) { - struct dev_pm_opp *opp = NULL, *temp; + struct dev_pm_opp *opp; + + guard(mutex)(&opp_table->lock); - mutex_lock(&opp_table->lock); - list_for_each_entry(temp, &opp_table->opp_list, node) { + list_for_each_entry(opp, &opp_table->opp_list, node) { /* * Refcount must be dropped only once for each OPP by OPP core, * do that with help of "removed" flag. */ - if (!temp->removed && dynamic == temp->dynamic) { - opp = temp; - break; - } + if (!opp->removed && dynamic == opp->dynamic) + return opp; } - mutex_unlock(&opp_table->lock); - return opp; + return NULL; } /* @@ -1657,20 +1869,14 @@ static void _opp_remove_all(struct opp_table *opp_table, bool dynamic) bool _opp_remove_all_static(struct opp_table *opp_table) { - mutex_lock(&opp_table->lock); - - if (!opp_table->parsed_static_opps) { - mutex_unlock(&opp_table->lock); - return false; - } + scoped_guard(mutex, &opp_table->lock) { + if (!opp_table->parsed_static_opps) + return false; - if (--opp_table->parsed_static_opps) { - mutex_unlock(&opp_table->lock); - return true; + if (--opp_table->parsed_static_opps) + return true; } - mutex_unlock(&opp_table->lock); - _opp_remove_all(opp_table, false); return true; } @@ -1683,16 +1889,13 @@ bool _opp_remove_all_static(struct opp_table *opp_table) */ void dev_pm_opp_remove_all_dynamic(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(dev); - opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return; _opp_remove_all(opp_table, true); - - /* Drop the reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic); @@ -1723,6 +1926,8 @@ struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table) INIT_LIST_HEAD(&opp->node); + opp->level = OPP_LEVEL_UNSET; + return opp; } @@ -1875,17 +2080,15 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct list_head *head; int ret; - mutex_lock(&opp_table->lock); - head = &opp_table->opp_list; + scoped_guard(mutex, &opp_table->lock) { + head = &opp_table->opp_list; - ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); - if (ret) { - mutex_unlock(&opp_table->lock); - return ret; - } + ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); + if (ret) + return ret; - list_add(&new_opp->node, head); - mutex_unlock(&opp_table->lock); + list_add(&new_opp->node, head); + } new_opp->opp_table = opp_table; kref_init(&new_opp->kref); @@ -1911,8 +2114,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, * _opp_add_v1() - Allocate a OPP based on v1 bindings. * @opp_table: OPP table * @dev: device for which we do this operation - * @freq: Frequency in Hz for this OPP - * @u_volt: Voltage in uVolts for this OPP + * @data: The OPP data for the OPP to add * @dynamic: Dynamically added OPPs. * * This function adds an opp definition to the opp table and returns status. @@ -1930,13 +2132,13 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, * -ENOMEM Memory allocation failure */ int _opp_add_v1(struct opp_table *opp_table, struct device *dev, - unsigned long freq, long u_volt, bool dynamic) + struct dev_pm_opp_data *data, bool dynamic) { struct dev_pm_opp *new_opp; - unsigned long tol; + unsigned long tol, u_volt = data->u_volt; int ret; - if (!assert_single_clk(opp_table)) + if (!assert_single_clk(opp_table, 0)) return -EINVAL; new_opp = _opp_allocate(opp_table); @@ -1944,7 +2146,9 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev, return -ENOMEM; /* populate the opp table */ - new_opp->rates[0] = freq; + new_opp->rates[0] = data->freq; + new_opp->level = data->level; + new_opp->turbo = data->turbo; tol = u_volt * opp_table->voltage_tolerance_v1 / 100; new_opp->supplies[0].u_volt = u_volt; new_opp->supplies[0].u_volt_min = u_volt - tol; @@ -1973,12 +2177,7 @@ free_opp: return ret; } -/** - * _opp_set_supported_hw() - Set supported platforms - * @dev: Device for which supported-hw has to be set. - * @versions: Array of hierarchy of versions to match. - * @count: Number of elements in the array. - * +/* * This is required only for the V2 bindings, and it enables a platform to * specify the hierarchy of versions it supports. OPP layer will then enable * OPPs, which are available for those versions, based on its 'opp-supported-hw' @@ -1991,8 +2190,8 @@ static int _opp_set_supported_hw(struct opp_table *opp_table, if (opp_table->supported_hw) return 0; - opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions), - GFP_KERNEL); + opp_table->supported_hw = kmemdup_array(versions, count, + sizeof(*versions), GFP_KERNEL); if (!opp_table->supported_hw) return -ENOMEM; @@ -2001,14 +2200,6 @@ static int _opp_set_supported_hw(struct opp_table *opp_table, return 0; } -/** - * _opp_put_supported_hw() - Releases resources blocked for supported hw - * @opp_table: OPP table returned by _opp_set_supported_hw(). - * - * This is required only for the V2 bindings, and is called for a matching - * _opp_set_supported_hw(). Until this is called, the opp_table structure - * will not be freed. - */ static void _opp_put_supported_hw(struct opp_table *opp_table) { if (opp_table->supported_hw) { @@ -2018,11 +2209,7 @@ static void _opp_put_supported_hw(struct opp_table *opp_table) } } -/** - * _opp_set_prop_name() - Set prop-extn name - * @dev: Device for which the prop-name has to be set. - * @name: name to postfix to properties. - * +/* * This is required only for the V2 bindings, and it enables a platform to * specify the extn to be used for certain property names. The properties to * which the extension will apply are opp-microvolt and opp-microamp. OPP core @@ -2040,14 +2227,6 @@ static int _opp_set_prop_name(struct opp_table *opp_table, const char *name) return 0; } -/** - * _opp_put_prop_name() - Releases resources blocked for prop-name - * @opp_table: OPP table returned by _opp_set_prop_name(). - * - * This is required only for the V2 bindings, and is called for a matching - * _opp_set_prop_name(). Until this is called, the opp_table structure - * will not be freed. - */ static void _opp_put_prop_name(struct opp_table *opp_table) { if (opp_table->prop_name) { @@ -2056,12 +2235,7 @@ static void _opp_put_prop_name(struct opp_table *opp_table) } } -/** - * _opp_set_regulators() - Set regulator names for the device - * @dev: Device for which regulator name is being set. - * @names: Array of pointers to the names of the regulator. - * @count: Number of regulators. - * +/* * In order to support OPP switching, OPP layer needs to know the name of the * device's regulators, as the core would be required to switch voltages as * well. @@ -2123,10 +2297,6 @@ free_regulators: return ret; } -/** - * _opp_put_regulators() - Releases resources blocked for regulator - * @opp_table: OPP table returned from _opp_set_regulators(). - */ static void _opp_put_regulators(struct opp_table *opp_table) { int i; @@ -2158,11 +2328,7 @@ static void _put_clks(struct opp_table *opp_table, int count) opp_table->clks = NULL; } -/** - * _opp_set_clknames() - Set clk names for the device - * @dev: Device for which clk names is being set. - * @names: Clk names. - * +/* * In order to support OPP switching, OPP layer needs to get pointers to the * clocks for the device. Simple cases work fine without using this routine * (i.e. by passing connection-id as NULL), but for a device with multiple @@ -2246,10 +2412,6 @@ free_clks: return ret; } -/** - * _opp_put_clknames() - Releases resources blocked for clks. - * @opp_table: OPP table returned from _opp_set_clknames(). - */ static void _opp_put_clknames(struct opp_table *opp_table) { if (!opp_table->clks) @@ -2261,11 +2423,7 @@ static void _opp_put_clknames(struct opp_table *opp_table) _put_clks(opp_table, opp_table->clk_count); } -/** - * _opp_set_config_regulators_helper() - Register custom set regulator helper. - * @dev: Device for which the helper is getting registered. - * @config_regulators: Custom set regulator helper. - * +/* * This is useful to support platforms with multiple regulators per device. * * This must be called before any OPPs are initialized for the device. @@ -2280,141 +2438,79 @@ static int _opp_set_config_regulators_helper(struct opp_table *opp_table, return 0; } -/** - * _opp_put_config_regulators_helper() - Releases resources blocked for - * config_regulators helper. - * @opp_table: OPP table returned from _opp_set_config_regulators_helper(). - * - * Release resources blocked for platform specific config_regulators helper. - */ static void _opp_put_config_regulators_helper(struct opp_table *opp_table) { if (opp_table->config_regulators) opp_table->config_regulators = NULL; } -static void _detach_genpd(struct opp_table *opp_table) +static int _opp_set_required_dev(struct opp_table *opp_table, + struct device *dev, + struct device *required_dev, + unsigned int index) { - int index; - - if (!opp_table->genpd_virt_devs) - return; + struct opp_table *required_table, *pd_table; + struct device *gdev; - for (index = 0; index < opp_table->required_opp_count; index++) { - if (!opp_table->genpd_virt_devs[index]) - continue; - - dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false); - opp_table->genpd_virt_devs[index] = NULL; + /* Genpd core takes care of propagation to parent genpd */ + if (opp_table->is_genpd) { + dev_err(dev, "%s: Operation not supported for genpds\n", __func__); + return -EOPNOTSUPP; } - kfree(opp_table->genpd_virt_devs); - opp_table->genpd_virt_devs = NULL; -} - -/** - * _opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer - * @dev: Consumer device for which the genpd is getting attached. - * @names: Null terminated array of pointers containing names of genpd to attach. - * @virt_devs: Pointer to return the array of virtual devices. - * - * Multiple generic power domains for a device are supported with the help of - * virtual genpd devices, which are created for each consumer device - genpd - * pair. These are the device structures which are attached to the power domain - * and are required by the OPP core to set the performance state of the genpd. - * The same API also works for the case where single genpd is available and so - * we don't need to support that separately. - * - * This helper will normally be called by the consumer driver of the device - * "dev", as only that has details of the genpd names. - * - * This helper needs to be called once with a list of all genpd to attach. - * Otherwise the original device structure will be used instead by the OPP core. - * - * The order of entries in the names array must match the order in which - * "required-opps" are added in DT. - */ -static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, - const char * const *names, struct device ***virt_devs) -{ - struct device *virt_dev; - int index = 0, ret = -EINVAL; - const char * const *name = names; + if (index >= opp_table->required_opp_count) { + dev_err(dev, "Required OPPs not available, can't set required devs\n"); + return -EINVAL; + } - if (opp_table->genpd_virt_devs) - return 0; + required_table = opp_table->required_opp_tables[index]; + if (IS_ERR(required_table)) { + dev_err(dev, "Missing OPP table, unable to set the required devs\n"); + return -ENODEV; + } /* - * If the genpd's OPP table isn't already initialized, parsing of the - * required-opps fail for dev. We should retry this after genpd's OPP - * table is added. + * The required_opp_tables parsing is not perfect, as the OPP core does + * the parsing solely based on the DT node pointers. The core sets the + * required_opp_tables entry to the first OPP table in the "opp_tables" + * list, that matches with the node pointer. + * + * If the target DT OPP table is used by multiple devices and they all + * create separate instances of 'struct opp_table' from it, then it is + * possible that the required_opp_tables entry may be set to the + * incorrect sibling device. + * + * Cross check it again and fix if required. */ - if (!opp_table->required_opp_count) - return -EPROBE_DEFER; - - mutex_lock(&opp_table->genpd_virt_dev_lock); - - opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count, - sizeof(*opp_table->genpd_virt_devs), - GFP_KERNEL); - if (!opp_table->genpd_virt_devs) - goto unlock; - - while (*name) { - if (index >= opp_table->required_opp_count) { - dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n", - *name, opp_table->required_opp_count, index); - goto err; - } - - virt_dev = dev_pm_domain_attach_by_name(dev, *name); - if (IS_ERR_OR_NULL(virt_dev)) { - ret = PTR_ERR(virt_dev) ? : -ENODEV; - dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret); - goto err; + gdev = dev_to_genpd_dev(required_dev); + if (IS_ERR(gdev)) + return PTR_ERR(gdev); + + pd_table = _find_opp_table(gdev); + if (!IS_ERR(pd_table)) { + if (pd_table != required_table) { + dev_pm_opp_put_opp_table(required_table); + opp_table->required_opp_tables[index] = pd_table; + } else { + dev_pm_opp_put_opp_table(pd_table); } - - opp_table->genpd_virt_devs[index] = virt_dev; - index++; - name++; } - if (virt_devs) - *virt_devs = opp_table->genpd_virt_devs; - mutex_unlock(&opp_table->genpd_virt_dev_lock); - + opp_table->required_devs[index] = required_dev; return 0; - -err: - _detach_genpd(opp_table); -unlock: - mutex_unlock(&opp_table->genpd_virt_dev_lock); - return ret; - } -/** - * _opp_detach_genpd() - Detach genpd(s) from the device. - * @opp_table: OPP table returned by _opp_attach_genpd(). - * - * This detaches the genpd(s), resets the virtual device pointers, and puts the - * OPP table. - */ -static void _opp_detach_genpd(struct opp_table *opp_table) +static void _opp_put_required_dev(struct opp_table *opp_table, + unsigned int index) { - /* - * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting - * used in parallel. - */ - mutex_lock(&opp_table->genpd_virt_dev_lock); - _detach_genpd(opp_table); - mutex_unlock(&opp_table->genpd_virt_dev_lock); + opp_table->required_devs[index] = NULL; } static void _opp_clear_config(struct opp_config_data *data) { - if (data->flags & OPP_CONFIG_GENPD) - _opp_detach_genpd(data->opp_table); + if (data->flags & OPP_CONFIG_REQUIRED_DEV) + _opp_put_required_dev(data->opp_table, + data->required_dev_index); if (data->flags & OPP_CONFIG_REGULATOR) _opp_put_regulators(data->opp_table); if (data->flags & OPP_CONFIG_SUPPORTED_HW) @@ -2526,14 +2622,15 @@ int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config) data->flags |= OPP_CONFIG_REGULATOR; } - /* Attach genpds */ - if (config->genpd_names) { - ret = _opp_attach_genpd(opp_table, dev, config->genpd_names, - config->virt_devs); + if (config->required_dev) { + ret = _opp_set_required_dev(opp_table, dev, + config->required_dev, + config->required_dev_index); if (ret) goto err; - data->flags |= OPP_CONFIG_GENPD; + data->required_dev_index = config->required_dev_index; + data->flags |= OPP_CONFIG_REQUIRED_DEV; } ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX), @@ -2551,7 +2648,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_config); /** * dev_pm_opp_clear_config() - Releases resources blocked for OPP configuration. - * @opp_table: OPP table returned from dev_pm_opp_set_config(). + * @token: The token returned by dev_pm_opp_set_config() previously. * * This allows all device OPP configurations to be cleared at once. This must be * called once for each call made to dev_pm_opp_set_config(), in order to free @@ -2638,18 +2735,16 @@ struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, return ERR_PTR(-EBUSY); for (i = 0; i < src_table->required_opp_count; i++) { - if (src_table->required_opp_tables[i] == dst_table) { - mutex_lock(&src_table->lock); + if (src_table->required_opp_tables[i] != dst_table) + continue; + scoped_guard(mutex, &src_table->lock) { list_for_each_entry(opp, &src_table->opp_list, node) { if (opp == src_opp) { - dest_opp = opp->required_opps[i]; - dev_pm_opp_get(dest_opp); + dest_opp = dev_pm_opp_get(opp->required_opps[i]); break; } } - - mutex_unlock(&src_table->lock); break; } } @@ -2681,7 +2776,6 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, unsigned int pstate) { struct dev_pm_opp *opp; - int dest_pstate = -EINVAL; int i; /* @@ -2694,6 +2788,12 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, if (!src_table || !src_table->required_opp_count) return pstate; + /* Both OPP tables must belong to genpds */ + if (unlikely(!src_table->is_genpd || !dst_table->is_genpd)) { + pr_err("%s: Performance state is only valid for genpds.\n", __func__); + return -EINVAL; + } + /* required-opps not fully initialized yet */ if (lazy_linking_pending(src_table)) return -EBUSY; @@ -2709,29 +2809,23 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, return -EINVAL; } - mutex_lock(&src_table->lock); + guard(mutex)(&src_table->lock); list_for_each_entry(opp, &src_table->opp_list, node) { - if (opp->pstate == pstate) { - dest_pstate = opp->required_opps[i]->pstate; - goto unlock; - } + if (opp->level == pstate) + return opp->required_opps[i]->level; } pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table, dst_table); -unlock: - mutex_unlock(&src_table->lock); - - return dest_pstate; + return -EINVAL; } /** - * dev_pm_opp_add() - Add an OPP table from a table definitions - * @dev: device for which we do this operation - * @freq: Frequency in Hz for this OPP - * @u_volt: Voltage in uVolts for this OPP + * dev_pm_opp_add_dynamic() - Add an OPP table from a table definitions + * @dev: The device for which we do this operation + * @data: The OPP data for the OPP to add * * This function adds an opp definition to the opp table and returns status. * The opp is made available by default and it can be controlled using @@ -2744,7 +2838,7 @@ unlock: * Duplicate OPPs (both freq and volt are same) and !opp->available * -ENOMEM Memory allocation failure */ -int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) +int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *data) { struct opp_table *opp_table; int ret; @@ -2756,13 +2850,13 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) /* Fix regulator count for dynamic OPPs */ opp_table->regulator_count = 1; - ret = _opp_add_v1(opp_table, dev, freq, u_volt, true); + ret = _opp_add_v1(opp_table, dev, data, true); if (ret) dev_pm_opp_put_opp_table(opp_table); return ret; } -EXPORT_SYMBOL_GPL(dev_pm_opp_add); +EXPORT_SYMBOL_GPL(dev_pm_opp_add_dynamic); /** * _opp_set_availability() - helper to set the availability of an opp @@ -2780,46 +2874,39 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add); static int _opp_set_availability(struct device *dev, unsigned long freq, bool availability_req) { - struct opp_table *opp_table; - struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); - int r = 0; + struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp; /* Find the opp_table */ - opp_table = _find_opp_table(dev); + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(dev); + if (IS_ERR(opp_table)) { - r = PTR_ERR(opp_table); - dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); - return r; + dev_warn(dev, "%s: Device OPP not found (%ld)\n", __func__, + PTR_ERR(opp_table)); + return PTR_ERR(opp_table); } - if (!assert_single_clk(opp_table)) { - r = -EINVAL; - goto put_table; - } + if (!assert_single_clk(opp_table, 0)) + return -EINVAL; - mutex_lock(&opp_table->lock); + scoped_guard(mutex, &opp_table->lock) { + /* Do we have the frequency? */ + list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { + if (tmp_opp->rates[0] == freq) { + opp = dev_pm_opp_get(tmp_opp); - /* Do we have the frequency? */ - list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { - if (tmp_opp->rates[0] == freq) { - opp = tmp_opp; - break; - } - } + /* Is update really needed? */ + if (opp->available == availability_req) + return 0; - if (IS_ERR(opp)) { - r = PTR_ERR(opp); - goto unlock; + opp->available = availability_req; + break; + } + } } - /* Is update really needed? */ - if (opp->available == availability_req) - goto unlock; - - opp->available = availability_req; - - dev_pm_opp_get(opp); - mutex_unlock(&opp_table->lock); + if (IS_ERR(opp)) + return PTR_ERR(opp); /* Notify the change of the OPP availability */ if (availability_req) @@ -2829,14 +2916,7 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_DISABLE, opp); - dev_pm_opp_put(opp); - goto put_table; - -unlock: - mutex_unlock(&opp_table->lock); -put_table: - dev_pm_opp_put_opp_table(opp_table); - return r; + return 0; } /** @@ -2856,65 +2936,92 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, unsigned long u_volt_max) { - struct opp_table *opp_table; - struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); - int r = 0; + struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp; + int r; /* Find the opp_table */ - opp_table = _find_opp_table(dev); + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(dev); + if (IS_ERR(opp_table)) { r = PTR_ERR(opp_table); dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); return r; } - if (!assert_single_clk(opp_table)) { - r = -EINVAL; - goto put_table; - } - - mutex_lock(&opp_table->lock); + if (!assert_single_clk(opp_table, 0)) + return -EINVAL; - /* Do we have the frequency? */ - list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { - if (tmp_opp->rates[0] == freq) { - opp = tmp_opp; - break; - } - } + scoped_guard(mutex, &opp_table->lock) { + /* Do we have the frequency? */ + list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { + if (tmp_opp->rates[0] == freq) { + opp = dev_pm_opp_get(tmp_opp); - if (IS_ERR(opp)) { - r = PTR_ERR(opp); - goto adjust_unlock; - } + /* Is update really needed? */ + if (opp->supplies->u_volt == u_volt) + return 0; - /* Is update really needed? */ - if (opp->supplies->u_volt == u_volt) - goto adjust_unlock; + opp->supplies->u_volt = u_volt; + opp->supplies->u_volt_min = u_volt_min; + opp->supplies->u_volt_max = u_volt_max; - opp->supplies->u_volt = u_volt; - opp->supplies->u_volt_min = u_volt_min; - opp->supplies->u_volt_max = u_volt_max; + break; + } + } + } - dev_pm_opp_get(opp); - mutex_unlock(&opp_table->lock); + if (IS_ERR(opp)) + return PTR_ERR(opp); /* Notify the voltage change of the OPP */ blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE, opp); - dev_pm_opp_put(opp); - goto put_table; - -adjust_unlock: - mutex_unlock(&opp_table->lock); -put_table: - dev_pm_opp_put_opp_table(opp_table); - return r; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); /** + * dev_pm_opp_sync_regulators() - Sync state of voltage regulators + * @dev: device for which we do this operation + * + * Sync voltage state of the OPP table regulators. + * + * Return: 0 on success or a negative error value. + */ +int dev_pm_opp_sync_regulators(struct device *dev) +{ + struct regulator *reg; + int ret, i; + + /* Device may not have OPP table */ + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(dev); + + if (IS_ERR(opp_table)) + return 0; + + /* Regulator may not be required for the device */ + if (unlikely(!opp_table->regulators)) + return 0; + + /* Nothing to sync if voltage wasn't changed */ + if (!opp_table->enabled) + return 0; + + for (i = 0; i < opp_table->regulator_count; i++) { + reg = opp_table->regulators[i]; + ret = regulator_sync_voltage(reg); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); + +/** * dev_pm_opp_enable() - Enable a specific OPP * @dev: device for which we do this operation * @freq: OPP frequency to enable @@ -2962,18 +3069,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_disable); */ int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) { - struct opp_table *opp_table; - int ret; + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(dev); - opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); - ret = blocking_notifier_chain_register(&opp_table->head, nb); - - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return blocking_notifier_chain_register(&opp_table->head, nb); } EXPORT_SYMBOL(dev_pm_opp_register_notifier); @@ -2987,18 +3089,13 @@ EXPORT_SYMBOL(dev_pm_opp_register_notifier); int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb) { - struct opp_table *opp_table; - int ret; + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(dev); - opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); - ret = blocking_notifier_chain_unregister(&opp_table->head, nb); - - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return blocking_notifier_chain_unregister(&opp_table->head, nb); } EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); @@ -3011,10 +3108,10 @@ EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); */ void dev_pm_opp_remove_table(struct device *dev) { - struct opp_table *opp_table; - /* Check for existing table for 'dev' */ - opp_table = _find_opp_table(dev); + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(dev); + if (IS_ERR(opp_table)) { int error = PTR_ERR(opp_table); @@ -3032,49 +3129,5 @@ void dev_pm_opp_remove_table(struct device *dev) **/ if (_opp_remove_all_static(opp_table)) dev_pm_opp_put_opp_table(opp_table); - - /* Drop reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); - -/** - * dev_pm_opp_sync_regulators() - Sync state of voltage regulators - * @dev: device for which we do this operation - * - * Sync voltage state of the OPP table regulators. - * - * Return: 0 on success or a negative error value. - */ -int dev_pm_opp_sync_regulators(struct device *dev) -{ - struct opp_table *opp_table; - struct regulator *reg; - int i, ret = 0; - - /* Device may not have OPP table */ - opp_table = _find_opp_table(dev); - if (IS_ERR(opp_table)) - return 0; - - /* Regulator may not be required for the device */ - if (unlikely(!opp_table->regulators)) - goto put_table; - - /* Nothing to sync if voltage wasn't changed */ - if (!opp_table->enabled) - goto put_table; - - for (i = 0; i < opp_table->regulator_count; i++) { - reg = opp_table->regulators[i]; - ret = regulator_sync_voltage(reg); - if (ret) - break; - } -put_table: - /* Drop reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); - - return ret; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c index 3c3506021501..a6da7ee3ec76 100644 --- a/drivers/opp/cpu.c +++ b/drivers/opp/cpu.c @@ -24,7 +24,7 @@ /** * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device * @dev: device for which we do this operation - * @table: Cpufreq table returned back to caller + * @opp_table: Cpufreq table returned back to caller * * Generate a cpufreq table for a provided device- this assumes that the * opp table is already initialized and ready for usage. @@ -43,7 +43,6 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **opp_table) { - struct dev_pm_opp *opp; struct cpufreq_frequency_table *freq_table = NULL; int i, max_opps, ret = 0; unsigned long rate; @@ -58,7 +57,9 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, for (i = 0, rate = 0; i < max_opps; i++, rate++) { /* find next rate */ - opp = dev_pm_opp_find_freq_ceil(dev, &rate); + struct dev_pm_opp *opp __free(put_opp) = + dev_pm_opp_find_freq_ceil(dev, &rate); + if (IS_ERR(opp)) { ret = PTR_ERR(opp); goto out; @@ -69,8 +70,6 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, /* Is Boost/turbo opp ? */ if (dev_pm_opp_is_turbo(opp)) freq_table[i].flags = CPUFREQ_BOOST_FREQ; - - dev_pm_opp_put(opp); } freq_table[i].driver_data = i; @@ -89,7 +88,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); /** * dev_pm_opp_free_cpufreq_table() - free the cpufreq table * @dev: device for which we do this operation - * @table: table to free + * @opp_table: table to free * * Free up the table allocated by dev_pm_opp_init_cpufreq_table */ @@ -156,11 +155,12 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) { struct opp_device *opp_dev; - struct opp_table *opp_table; struct device *dev; - int cpu, ret = 0; + int cpu; + + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(cpu_dev); - opp_table = _find_opp_table(cpu_dev); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); @@ -186,9 +186,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; } - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); @@ -205,32 +203,26 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { struct opp_device *opp_dev; - struct opp_table *opp_table; - int ret = 0; - opp_table = _find_opp_table(cpu_dev); + struct opp_table *opp_table __free(put_opp_table) = + _find_opp_table(cpu_dev); + if (IS_ERR(opp_table)) return PTR_ERR(opp_table); - if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) { - ret = -EINVAL; - goto put_opp_table; - } + if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) + return -EINVAL; cpumask_clear(cpumask); if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { - mutex_lock(&opp_table->lock); + guard(mutex)(&opp_table->lock); list_for_each_entry(opp_dev, &opp_table->dev_list, node) cpumask_set_cpu(opp_dev->dev->id, cpumask); - mutex_unlock(&opp_table->lock); } else { cpumask_set_cpu(cpu_dev->id, cpumask); } -put_opp_table: - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus); diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c index 96a30a032c5f..8fc6238b1728 100644 --- a/drivers/opp/debugfs.c +++ b/drivers/opp/debugfs.c @@ -37,10 +37,12 @@ static ssize_t bw_name_read(struct file *fp, char __user *userbuf, size_t count, loff_t *ppos) { struct icc_path *path = fp->private_data; + const char *name = icc_get_name(path); char buf[64]; - int i; + int i = 0; - i = scnprintf(buf, sizeof(buf), "%.62s\n", icc_get_name(path)); + if (name) + i = scnprintf(buf, sizeof(buf), "%.62s\n", name); return simple_read_from_buffer(userbuf, count, ppos, buf, i); } @@ -56,11 +58,11 @@ static void opp_debug_create_bw(struct dev_pm_opp *opp, struct dentry *pdentry) { struct dentry *d; - char name[11]; + char name[] = "icc-path-XXXXXXXXXXX"; /* Integers can take 11 chars max */ int i; for (i = 0; i < opp_table->path_count; i++) { - snprintf(name, sizeof(name), "icc-path-%.1d", i); + snprintf(name, sizeof(name), "icc-path-%d", i); /* Create per-path directory */ d = debugfs_create_dir(name, pdentry); @@ -78,7 +80,7 @@ static void opp_debug_create_clks(struct dev_pm_opp *opp, struct opp_table *opp_table, struct dentry *pdentry) { - char name[12]; + char name[] = "rate_hz_XXXXXXXXXXX"; /* Integers can take 11 chars max */ int i; if (opp_table->clk_count == 1) { @@ -100,7 +102,7 @@ static void opp_debug_create_supplies(struct dev_pm_opp *opp, int i; for (i = 0; i < opp_table->regulator_count; i++) { - char name[15]; + char name[] = "supply-XXXXXXXXXXX"; /* Integers can take 11 chars max */ snprintf(name, sizeof(name), "supply-%d", i); @@ -152,7 +154,6 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table) debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic); debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo); debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend); - debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate); debugfs_create_u32("level", S_IRUGO, d, &opp->level); debugfs_create_ulong("clock_latency_ns", S_IRUGO, d, &opp->clock_latency_ns); @@ -216,7 +217,7 @@ static void opp_migrate_dentry(struct opp_device *opp_dev, { struct opp_device *new_dev = NULL, *iter; const struct device *dev; - struct dentry *dentry; + int err; /* Look for next opp-dev */ list_for_each_entry(iter, &opp_table->dev_list, node) @@ -233,16 +234,14 @@ static void opp_migrate_dentry(struct opp_device *opp_dev, opp_set_dev_name(dev, opp_table->dentry_name); - dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir, - opp_table->dentry_name); - if (!dentry) { + err = debugfs_change_name(opp_dev->dentry, "%s", opp_table->dentry_name); + if (err) { dev_err(dev, "%s: Failed to rename link from: %s to %s\n", __func__, dev_name(opp_dev->dev), dev_name(dev)); return; } - new_dev->dentry = dentry; - opp_table->dentry = dentry; + new_dev->dentry = opp_table->dentry = opp_dev->dentry; } /** diff --git a/drivers/opp/of.c b/drivers/opp/of.c index e55c6095adf0..1e0d0adb18e1 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -13,7 +13,7 @@ #include <linux/cpu.h> #include <linux/errno.h> #include <linux/device.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/pm_domain.h> #include <linux/slab.h> #include <linux/export.h> @@ -21,6 +21,9 @@ #include "opp.h" +/* OPP tables with uninitialized required OPPs, protected by opp_table_lock */ +static LIST_HEAD(lazy_opp_tables); + /* * Returns opp descriptor node for a device node, caller must * do of_node_put(). @@ -42,9 +45,10 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node); struct opp_table *_managed_opp(struct device *dev, int index) { struct opp_table *opp_table, *managed_table = NULL; - struct device_node *np; - np = _opp_of_get_opp_desc_node(dev->of_node, index); + struct device_node *np __free(device_node) = + _opp_of_get_opp_desc_node(dev->of_node, index); + if (!np) return NULL; @@ -57,17 +61,13 @@ struct opp_table *_managed_opp(struct device *dev, int index) * But the OPPs will be considered as shared only if the * OPP table contains a "opp-shared" property. */ - if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { - _get_opp_table_kref(opp_table); - managed_table = opp_table; - } + if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) + managed_table = dev_pm_opp_get_opp_table_ref(opp_table); break; } } - of_node_put(np); - return managed_table; } @@ -77,18 +77,13 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table, { struct dev_pm_opp *opp; - mutex_lock(&opp_table->lock); + guard(mutex)(&opp_table->lock); list_for_each_entry(opp, &opp_table->opp_list, node) { - if (opp->np == opp_np) { - dev_pm_opp_get(opp); - mutex_unlock(&opp_table->lock); - return opp; - } + if (opp->np == opp_np) + return dev_pm_opp_get(opp); } - mutex_unlock(&opp_table->lock); - return NULL; } @@ -102,26 +97,20 @@ static struct device_node *of_parse_required_opp(struct device_node *np, static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np) { struct opp_table *opp_table; - struct device_node *opp_table_np; - opp_table_np = of_get_parent(opp_np); + struct device_node *opp_table_np __free(device_node) = + of_get_parent(opp_np); + if (!opp_table_np) - goto err; + return ERR_PTR(-ENODEV); - /* It is safe to put the node now as all we need now is its address */ - of_node_put(opp_table_np); + guard(mutex)(&opp_table_lock); - mutex_lock(&opp_table_lock); list_for_each_entry(opp_table, &opp_tables, node) { - if (opp_table_np == opp_table->np) { - _get_opp_table_kref(opp_table); - mutex_unlock(&opp_table_lock); - return opp_table; - } + if (opp_table_np == opp_table->np) + return dev_pm_opp_get_opp_table_ref(opp_table); } - mutex_unlock(&opp_table_lock); -err: return ERR_PTR(-ENODEV); } @@ -145,6 +134,8 @@ static void _opp_table_free_required_tables(struct opp_table *opp_table) opp_table->required_opp_count = 0; opp_table->required_opp_tables = NULL; + + guard(mutex)(&opp_table_lock); list_del(&opp_table->lazy); } @@ -157,65 +148,69 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, struct device_node *opp_np) { struct opp_table **required_opp_tables; - struct device_node *required_np, *np; bool lazy = false; - int count, i; + int count, i, size; /* Traversing the first OPP node is all we need */ - np = of_get_next_available_child(opp_np, NULL); + struct device_node *np __free(device_node) = + of_get_next_available_child(opp_np, NULL); + if (!np) { dev_warn(dev, "Empty OPP table\n"); - return; } count = of_count_phandle_with_args(np, "required-opps", NULL); if (count <= 0) - goto put_np; + return; - required_opp_tables = kcalloc(count, sizeof(*required_opp_tables), - GFP_KERNEL); + size = sizeof(*required_opp_tables) + sizeof(*opp_table->required_devs); + required_opp_tables = kcalloc(count, size, GFP_KERNEL); if (!required_opp_tables) - goto put_np; + return; opp_table->required_opp_tables = required_opp_tables; + opp_table->required_devs = (void *)(required_opp_tables + count); opp_table->required_opp_count = count; for (i = 0; i < count; i++) { - required_np = of_parse_required_opp(np, i); - if (!required_np) - goto free_required_tables; + struct device_node *required_np __free(device_node) = + of_parse_required_opp(np, i); + + if (!required_np) { + _opp_table_free_required_tables(opp_table); + return; + } required_opp_tables[i] = _find_table_of_opp_np(required_np); - of_node_put(required_np); if (IS_ERR(required_opp_tables[i])) lazy = true; } /* Let's do the linking later on */ - if (lazy) + if (lazy) { + /* + * The OPP table is not held while allocating the table, take it + * now to avoid corruption to the lazy_opp_tables list. + */ + guard(mutex)(&opp_table_lock); list_add(&opp_table->lazy, &lazy_opp_tables); - - goto put_np; - -free_required_tables: - _opp_table_free_required_tables(opp_table); -put_np: - of_node_put(np); + } } void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index) { - struct device_node *np, *opp_np; + struct device_node *opp_np; u32 val; /* * Only required for backward compatibility with v1 bindings, but isn't * harmful for other cases. And so we do it unconditionally. */ - np = of_node_get(dev->of_node); + struct device_node *np __free(device_node) = of_node_get(dev->of_node); + if (!np) return; @@ -224,13 +219,11 @@ void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, of_property_read_u32(np, "voltage-tolerance", &opp_table->voltage_tolerance_v1); - if (of_find_property(np, "#power-domain-cells", NULL)) + if (of_property_present(np, "#power-domain-cells")) opp_table->is_genpd = true; /* Get OPP table node */ opp_np = _opp_of_get_opp_desc_node(np, index); - of_node_put(np); - if (!opp_np) return; @@ -281,24 +274,39 @@ void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp) of_node_put(opp->np); } +static int _link_required_opps(struct dev_pm_opp *opp, + struct opp_table *required_table, int index) +{ + struct device_node *np __free(device_node) = + of_parse_required_opp(opp->np, index); + + if (unlikely(!np)) + return -ENODEV; + + opp->required_opps[index] = _find_opp_of_np(required_table, np); + if (!opp->required_opps[index]) { + pr_err("%s: Unable to find required OPP node: %pOF (%d)\n", + __func__, opp->np, index); + return -ENODEV; + } + + return 0; +} + /* Populate all required OPPs which are part of "required-opps" list */ static int _of_opp_alloc_required_opps(struct opp_table *opp_table, struct dev_pm_opp *opp) { - struct dev_pm_opp **required_opps; struct opp_table *required_table; - struct device_node *np; int i, ret, count = opp_table->required_opp_count; if (!count) return 0; - required_opps = kcalloc(count, sizeof(*required_opps), GFP_KERNEL); - if (!required_opps) + opp->required_opps = kcalloc(count, sizeof(*opp->required_opps), GFP_KERNEL); + if (!opp->required_opps) return -ENOMEM; - opp->required_opps = required_opps; - for (i = 0; i < count; i++) { required_table = opp_table->required_opp_tables[i]; @@ -306,21 +314,9 @@ static int _of_opp_alloc_required_opps(struct opp_table *opp_table, if (IS_ERR_OR_NULL(required_table)) continue; - np = of_parse_required_opp(opp->np, i); - if (unlikely(!np)) { - ret = -ENODEV; - goto free_required_opps; - } - - required_opps[i] = _find_opp_of_np(required_table, np); - of_node_put(np); - - if (!required_opps[i]) { - pr_err("%s: Unable to find required OPP node: %pOF (%d)\n", - __func__, opp->np, i); - ret = -ENODEV; + ret = _link_required_opps(opp, required_table, i); + if (ret) goto free_required_opps; - } } return 0; @@ -335,22 +331,13 @@ free_required_opps: static int lazy_link_required_opps(struct opp_table *opp_table, struct opp_table *new_table, int index) { - struct device_node *required_np; struct dev_pm_opp *opp; + int ret; list_for_each_entry(opp, &opp_table->opp_list, node) { - required_np = of_parse_required_opp(opp->np, index); - if (unlikely(!required_np)) - return -ENODEV; - - opp->required_opps[index] = _find_opp_of_np(new_table, required_np); - of_node_put(required_np); - - if (!opp->required_opps[index]) { - pr_err("%s: Unable to find required OPP node: %pOF (%d)\n", - __func__, opp->np, index); - return -ENODEV; - } + ret = _link_required_opps(opp, new_table, index); + if (ret) + return ret; } return 0; @@ -360,17 +347,17 @@ static int lazy_link_required_opps(struct opp_table *opp_table, static void lazy_link_required_opp_table(struct opp_table *new_table) { struct opp_table *opp_table, *temp, **required_opp_tables; - struct device_node *required_np, *opp_np, *required_table_np; struct dev_pm_opp *opp; int i, ret; - mutex_lock(&opp_table_lock); + guard(mutex)(&opp_table_lock); list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) { bool lazy = false; /* opp_np can't be invalid here */ - opp_np = of_get_next_available_child(opp_table->np, NULL); + struct device_node *opp_np __free(device_node) = + of_get_next_available_child(opp_table->np, NULL); for (i = 0; i < opp_table->required_opp_count; i++) { required_opp_tables = opp_table->required_opp_tables; @@ -380,11 +367,10 @@ static void lazy_link_required_opp_table(struct opp_table *new_table) continue; /* required_np can't be invalid here */ - required_np = of_parse_required_opp(opp_np, i); - required_table_np = of_get_parent(required_np); - - of_node_put(required_table_np); - of_node_put(required_np); + struct device_node *required_np __free(device_node) = + of_parse_required_opp(opp_np, i); + struct device_node *required_table_np __free(device_node) = + of_get_parent(required_np); /* * Newly added table isn't the required opp-table for @@ -395,8 +381,7 @@ static void lazy_link_required_opp_table(struct opp_table *new_table) continue; } - required_opp_tables[i] = new_table; - _get_opp_table_kref(new_table); + required_opp_tables[i] = dev_pm_opp_get_opp_table_ref(new_table); /* Link OPPs now */ ret = lazy_link_required_opps(opp_table, new_table, i); @@ -407,8 +392,6 @@ static void lazy_link_required_opp_table(struct opp_table *new_table) } } - of_node_put(opp_np); - /* All required opp-tables found, remove from lazy list */ if (!lazy) { list_del_init(&opp_table->lazy); @@ -417,22 +400,21 @@ static void lazy_link_required_opp_table(struct opp_table *new_table) _required_opps_available(opp, opp_table->required_opp_count); } } - - mutex_unlock(&opp_table_lock); } static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) { - struct device_node *np, *opp_np; + struct device_node *opp_np __free(device_node) = NULL; struct property *prop; if (!opp_table) { - np = of_node_get(dev->of_node); + struct device_node *np __free(device_node) = + of_node_get(dev->of_node); + if (!np) return -ENODEV; opp_np = _opp_of_get_opp_desc_node(np, 0); - of_node_put(np); } else { opp_np = of_node_get(opp_table->np); } @@ -442,16 +424,15 @@ static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) return 0; /* Checking only first OPP is sufficient */ - np = of_get_next_available_child(opp_np, NULL); - of_node_put(opp_np); + struct device_node *np __free(device_node) = + of_get_next_available_child(opp_np, NULL); + if (!np) { dev_err(dev, "OPP table empty\n"); return -EINVAL; } prop = of_find_property(np, "opp-peak-kBps", NULL); - of_node_put(np); - if (!prop || !prop->length) return 0; @@ -461,7 +442,7 @@ static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table) { - struct device_node *np; + struct device_node *np __free(device_node) = of_node_get(dev->of_node); int ret, i, count, num_paths; struct icc_path **paths; @@ -471,15 +452,13 @@ int dev_pm_opp_of_find_icc_paths(struct device *dev, else if (ret <= 0) return ret; - ret = 0; - - np = of_node_get(dev->of_node); if (!np) return 0; + ret = 0; + count = of_count_phandle_with_args(np, "interconnects", "#interconnect-cells"); - of_node_put(np); if (count < 0) return 0; @@ -497,11 +476,7 @@ int dev_pm_opp_of_find_icc_paths(struct device *dev, for (i = 0; i < num_paths; i++) { paths[i] = of_icc_get_by_index(dev, i); if (IS_ERR(paths[i])) { - ret = PTR_ERR(paths[i]); - if (ret != -EPROBE_DEFER) { - dev_err(dev, "%s: Unable to get path%d: %d\n", - __func__, i, ret); - } + ret = dev_err_probe(dev, PTR_ERR(paths[i]), "%s: Unable to get path%d\n", __func__, i); goto err; } } @@ -536,7 +511,7 @@ static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, * an OPP then the OPP should not be enabled as there is * no way to see if the hardware supports it. */ - if (of_find_property(np, "opp-supported-hw", NULL)) + if (of_property_present(np, "opp-supported-hw")) return false; else return true; @@ -920,7 +895,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, ret = _of_opp_alloc_required_opps(opp_table, new_opp); if (ret) - goto free_opp; + goto put_node; if (!of_property_read_u32(np, "clock-latency-ns", &val)) new_opp->clock_latency_ns = val; @@ -929,9 +904,6 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, if (ret) goto free_required_opps; - if (opp_table->is_genpd) - new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp); - ret = _opp_add(dev, new_opp, opp_table); if (ret) { /* Don't return error for duplicate OPPs */ @@ -973,6 +945,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, free_required_opps: _of_opp_free_required_opps(opp_table, new_opp); +put_node: + of_node_put(np); free_opp: _opp_free(new_opp); @@ -987,15 +961,14 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) struct dev_pm_opp *opp; /* OPP table is already initialized for the device */ - mutex_lock(&opp_table->lock); - if (opp_table->parsed_static_opps) { - opp_table->parsed_static_opps++; - mutex_unlock(&opp_table->lock); - return 0; - } + scoped_guard(mutex, &opp_table->lock) { + if (opp_table->parsed_static_opps) { + opp_table->parsed_static_opps++; + return 0; + } - opp_table->parsed_static_opps = 1; - mutex_unlock(&opp_table->lock); + opp_table->parsed_static_opps = 1; + } /* We have opp-table node now, iterate over it and add OPPs */ for_each_available_child_of_node(opp_table->np, np) { @@ -1018,14 +991,6 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) goto remove_static_opp; } - list_for_each_entry(opp, &opp_table->opp_list, node) { - /* Any non-zero performance state would enable the feature */ - if (opp->pstate) { - opp_table->genpd_performance_state = true; - break; - } - } - lazy_link_required_opp_table(opp_table); return 0; @@ -1043,15 +1008,14 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) const __be32 *val; int nr, ret = 0; - mutex_lock(&opp_table->lock); - if (opp_table->parsed_static_opps) { - opp_table->parsed_static_opps++; - mutex_unlock(&opp_table->lock); - return 0; - } + scoped_guard(mutex, &opp_table->lock) { + if (opp_table->parsed_static_opps) { + opp_table->parsed_static_opps++; + return 0; + } - opp_table->parsed_static_opps = 1; - mutex_unlock(&opp_table->lock); + opp_table->parsed_static_opps = 1; + } prop = of_find_property(dev->of_node, "operating-points", NULL); if (!prop) { @@ -1078,11 +1042,15 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) while (nr) { unsigned long freq = be32_to_cpup(val++) * 1000; unsigned long volt = be32_to_cpup(val++); + struct dev_pm_opp_data data = { + .freq = freq, + .u_volt = volt, + }; - ret = _opp_add_v1(opp_table, dev, freq, volt, false); + ret = _opp_add_v1(opp_table, dev, &data, false); if (ret) { dev_err(dev, "%s: Failed to add OPP %ld (%d)\n", - __func__, freq, ret); + __func__, data.freq, ret); goto remove_static_opp; } nr -= 2; @@ -1305,11 +1273,12 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { - struct device_node *np, *tmp_np, *cpu_np; - int cpu, ret = 0; + int cpu; /* Get OPP descriptor node */ - np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); + struct device_node *np __free(device_node) = + dev_pm_opp_of_get_opp_desc_node(cpu_dev); + if (!np) { dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__); return -ENOENT; @@ -1319,39 +1288,36 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, /* OPPs are shared ? */ if (!of_property_read_bool(np, "opp-shared")) - goto put_cpu_node; + return 0; for_each_possible_cpu(cpu) { if (cpu == cpu_dev->id) continue; - cpu_np = of_cpu_device_node_get(cpu); + struct device_node *cpu_np __free(device_node) = + of_cpu_device_node_get(cpu); + if (!cpu_np) { dev_err(cpu_dev, "%s: failed to get cpu%d node\n", __func__, cpu); - ret = -ENOENT; - goto put_cpu_node; + return -ENOENT; } /* Get OPP descriptor node */ - tmp_np = _opp_of_get_opp_desc_node(cpu_np, 0); - of_node_put(cpu_np); + struct device_node *tmp_np __free(device_node) = + _opp_of_get_opp_desc_node(cpu_np, 0); + if (!tmp_np) { pr_err("%pOF: Couldn't find opp node\n", cpu_np); - ret = -ENOENT; - goto put_cpu_node; + return -ENOENT; } /* CPUs are sharing opp node */ if (np == tmp_np) cpumask_set_cpu(cpu, cpumask); - - of_node_put(tmp_np); } -put_cpu_node: - of_node_put(np); - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); @@ -1368,38 +1334,79 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); */ int of_get_required_opp_performance_state(struct device_node *np, int index) { - struct dev_pm_opp *opp; - struct device_node *required_np; - struct opp_table *opp_table; int pstate = -EINVAL; - required_np = of_parse_required_opp(np, index); + struct device_node *required_np __free(device_node) = + of_parse_required_opp(np, index); + if (!required_np) return -ENODEV; - opp_table = _find_table_of_opp_np(required_np); + struct opp_table *opp_table __free(put_opp_table) = + _find_table_of_opp_np(required_np); + if (IS_ERR(opp_table)) { pr_err("%s: Failed to find required OPP table %pOF: %ld\n", __func__, np, PTR_ERR(opp_table)); - goto put_required_np; + return PTR_ERR(opp_table); } - opp = _find_opp_of_np(opp_table, required_np); - if (opp) { - pstate = opp->pstate; - dev_pm_opp_put(opp); + /* The OPP tables must belong to a genpd */ + if (unlikely(!opp_table->is_genpd)) { + pr_err("%s: Performance state is only valid for genpds.\n", __func__); + return -EINVAL; } - dev_pm_opp_put_opp_table(opp_table); + struct dev_pm_opp *opp __free(put_opp) = + _find_opp_of_np(opp_table, required_np); -put_required_np: - of_node_put(required_np); + if (opp) { + if (opp->level == OPP_LEVEL_UNSET) { + pr_err("%s: OPP levels aren't available for %pOF\n", + __func__, np); + } else { + pstate = opp->level; + } + } return pstate; } EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state); /** + * dev_pm_opp_of_has_required_opp - Find out if a required-opps exists. + * @dev: The device to investigate. + * + * Returns true if the device's node has a "operating-points-v2" property and if + * the corresponding node for the opp-table describes opp nodes that uses the + * "required-opps" property. + * + * Return: True if a required-opps is present, else false. + */ +bool dev_pm_opp_of_has_required_opp(struct device *dev) +{ + int count; + + struct device_node *opp_np __free(device_node) = + _opp_of_get_opp_desc_node(dev->of_node, 0); + + if (!opp_np) + return false; + + struct device_node *np __free(device_node) = + of_get_next_available_child(opp_np, NULL); + + if (!np) { + dev_warn(dev, "Empty OPP table\n"); + return false; + } + + count = of_count_phandle_with_args(np, "required-opps", NULL); + + return count > 0; +} + +/** * dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp * @opp: opp for which DT node has to be returned for * @@ -1430,17 +1437,18 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); static int __maybe_unused _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz) { - struct dev_pm_opp *opp; unsigned long opp_freq, opp_power; /* Find the right frequency and related OPP */ opp_freq = *kHz * 1000; - opp = dev_pm_opp_find_freq_ceil(dev, &opp_freq); + + struct dev_pm_opp *opp __free(put_opp) = + dev_pm_opp_find_freq_ceil(dev, &opp_freq); + if (IS_ERR(opp)) return -EINVAL; opp_power = dev_pm_opp_get_power(opp); - dev_pm_opp_put(opp); if (!opp_power) return -EINVAL; @@ -1450,44 +1458,50 @@ _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz) return 0; } -/* - * Callback function provided to the Energy Model framework upon registration. +/** + * dev_pm_opp_calc_power() - Calculate power value for device with EM + * @dev : Device for which an Energy Model has to be registered + * @uW : New power value that is calculated + * @kHz : Frequency for which the new power is calculated + * * This computes the power estimated by @dev at @kHz if it is the frequency * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled * frequency and @uW to the associated power. The power is estimated as * P = C * V^2 * f with C being the device's capacitance and V and f * respectively the voltage and frequency of the OPP. + * It is also used as a callback function provided to the Energy Model + * framework upon registration. * * Returns -EINVAL if the power calculation failed because of missing * parameters, 0 otherwise. */ -static int __maybe_unused _get_power(struct device *dev, unsigned long *uW, - unsigned long *kHz) +int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW, + unsigned long *kHz) { - struct dev_pm_opp *opp; - struct device_node *np; unsigned long mV, Hz; u32 cap; u64 tmp; int ret; - np = of_node_get(dev->of_node); + struct device_node *np __free(device_node) = of_node_get(dev->of_node); + if (!np) return -EINVAL; ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); - of_node_put(np); if (ret) return -EINVAL; Hz = *kHz * 1000; - opp = dev_pm_opp_find_freq_ceil(dev, &Hz); + + struct dev_pm_opp *opp __free(put_opp) = + dev_pm_opp_find_freq_ceil(dev, &Hz); + if (IS_ERR(opp)) return -EINVAL; mV = dev_pm_opp_get_voltage(opp) / 1000; - dev_pm_opp_put(opp); if (!mV) return -EINVAL; @@ -1500,23 +1514,20 @@ static int __maybe_unused _get_power(struct device *dev, unsigned long *uW, return 0; } +EXPORT_SYMBOL_GPL(dev_pm_opp_calc_power); static bool _of_has_opp_microwatt_property(struct device *dev) { - unsigned long power, freq = 0; - struct dev_pm_opp *opp; + unsigned long freq = 0; /* Check if at least one OPP has needed property */ - opp = dev_pm_opp_find_freq_ceil(dev, &freq); - if (IS_ERR(opp)) - return false; + struct dev_pm_opp *opp __free(put_opp) = + dev_pm_opp_find_freq_ceil(dev, &freq); - power = dev_pm_opp_get_power(opp); - dev_pm_opp_put(opp); - if (!power) + if (IS_ERR(opp)) return false; - return true; + return !!dev_pm_opp_get_power(opp); } /** @@ -1533,11 +1544,15 @@ static bool _of_has_opp_microwatt_property(struct device *dev) int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus) { struct em_data_callback em_cb; - struct device_node *np; int ret, nr_opp; u32 cap; - if (IS_ERR_OR_NULL(dev)) { + if (IS_ERR_OR_NULL(dev)) + return -EINVAL; + + struct device_node *np __free(device_node) = of_node_get(dev->of_node); + + if (!np) { ret = -EINVAL; goto failed; } @@ -1554,12 +1569,6 @@ int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus) goto register_em; } - np = of_node_get(dev->of_node); - if (!np) { - ret = -EINVAL; - goto failed; - } - /* * Register an EM only if the 'dynamic-power-coefficient' property is * set in devicetree. It is assumed the voltage values are known if that @@ -1568,14 +1577,13 @@ int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus) * user about the inconsistent configuration. */ ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); - of_node_put(np); if (ret || !cap) { dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n"); ret = -EINVAL; goto failed; } - EM_SET_ACTIVE_POWER_CB(em_cb, _get_power); + EM_SET_ACTIVE_POWER_CB(em_cb, dev_pm_opp_calc_power); register_em: ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus, true); diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 3a6e077df386..9eba63e01a9e 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -26,7 +26,7 @@ struct regulator; /* Lock to allow exclusive modification to the device and opp lists */ extern struct mutex opp_table_lock; -extern struct list_head opp_tables, lazy_opp_tables; +extern struct list_head opp_tables; /* OPP Config flags */ #define OPP_CONFIG_CLK BIT(0) @@ -34,12 +34,13 @@ extern struct list_head opp_tables, lazy_opp_tables; #define OPP_CONFIG_REGULATOR_HELPER BIT(2) #define OPP_CONFIG_PROP_NAME BIT(3) #define OPP_CONFIG_SUPPORTED_HW BIT(4) -#define OPP_CONFIG_GENPD BIT(5) +#define OPP_CONFIG_REQUIRED_DEV BIT(5) /** * struct opp_config_data - data for set config operations * @opp_table: OPP table * @flags: OPP config flags + * @required_dev_index: The position in the array of required_devs * * This structure stores the OPP config information for each OPP table * configuration by the callers. @@ -47,6 +48,19 @@ extern struct list_head opp_tables, lazy_opp_tables; struct opp_config_data { struct opp_table *opp_table; unsigned int flags; + unsigned int required_dev_index; +}; + +/** + * struct dev_pm_opp_icc_bw - Interconnect bandwidth values + * @avg: Average bandwidth corresponding to this OPP (in icc units) + * @peak: Peak bandwidth corresponding to this OPP (in icc units) + * + * This structure stores the bandwidth values for a single interconnect path. + */ +struct dev_pm_opp_icc_bw { + u32 avg; + u32 peak; }; /* @@ -78,7 +92,6 @@ struct opp_config_data { * @turbo: true if turbo (boost) OPP * @suspend: true if suspend OPP * @removed: flag indicating that OPP's reference is dropped by OPP core. - * @pstate: Device's power domain's performance state. * @rates: Frequencies in hertz * @level: Performance level * @supplies: Power supplies voltage/current values @@ -101,7 +114,6 @@ struct dev_pm_opp { bool turbo; bool suspend; bool removed; - unsigned int pstate; unsigned long *rates; unsigned int level; @@ -159,13 +171,12 @@ enum opp_table_access { * @clock_latency_ns_max: Max clock latency in nanoseconds. * @parsed_static_opps: Count of devices for which OPPs are initialized from DT. * @shared_opp: OPP is shared between multiple devices. - * @rate_clk_single: Currently configured frequency for single clk. + * @current_rate_single_clk: Currently configured frequency for single clk. * @current_opp: Currently configured OPP for the table. * @suspend_opp: Pointer to OPP to be used during device suspend. - * @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers. - * @genpd_virt_devs: List of virtual devices for multiple genpd support. * @required_opp_tables: List of device OPP tables that are required by OPPs in * this table. + * @required_devs: List of devices for required OPP tables. * @required_opp_count: Number of required devices. * @supported_hw: Array of version number to support. * @supported_hw_count: Number of elements in supported_hw array. @@ -182,7 +193,6 @@ enum opp_table_access { * @paths: Interconnect path handles * @path_count: Number of interconnect paths * @enabled: Set to true if the device's resources are enabled/configured. - * @genpd_performance_state: Device's power domain support performance state. * @is_genpd: Marks if the OPP table belongs to a genpd. * @dentry: debugfs dentry pointer of the real device directory (not links). * @dentry_name: Name of the real dentry. @@ -210,13 +220,12 @@ struct opp_table { unsigned int parsed_static_opps; enum opp_table_access shared_opp; - unsigned long rate_clk_single; + unsigned long current_rate_single_clk; struct dev_pm_opp *current_opp; struct dev_pm_opp *suspend_opp; - struct mutex genpd_virt_dev_lock; - struct device **genpd_virt_devs; struct opp_table **required_opp_tables; + struct device **required_devs; unsigned int required_opp_count; unsigned int *supported_hw; @@ -232,7 +241,6 @@ struct opp_table { struct icc_path **paths; unsigned int path_count; bool enabled; - bool genpd_performance_state; bool is_genpd; #ifdef CONFIG_DEBUG_FS @@ -242,9 +250,7 @@ struct opp_table { }; /* Routines internal to opp core */ -void dev_pm_opp_get(struct dev_pm_opp *opp); bool _opp_remove_all_static(struct opp_table *opp_table); -void _get_opp_table_kref(struct opp_table *opp_table); int _get_opp_count(struct opp_table *opp_table); struct opp_table *_find_opp_table(struct device *dev); struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table); @@ -252,10 +258,9 @@ struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table); void _opp_free(struct dev_pm_opp *opp); int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1, struct dev_pm_opp *opp2); int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table); -int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic); +int _opp_add_v1(struct opp_table *opp_table, struct device *dev, struct dev_pm_opp_data *data, bool dynamic); void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu); struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk); -void _put_opp_list_kref(struct opp_table *opp_table); void _required_opps_available(struct dev_pm_opp *opp, int count); static inline bool lazy_linking_pending(struct opp_table *opp_table) diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c index 8f3f13fbbb25..5f0fb3ea385b 100644 --- a/drivers/opp/ti-opp-supply.c +++ b/drivers/opp/ti-opp-supply.c @@ -18,6 +18,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> +#include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> @@ -373,23 +374,15 @@ static int ti_opp_supply_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device *cpu_dev = get_cpu_device(0); - const struct of_device_id *match; const struct ti_opp_supply_of_data *of_data; int ret = 0; - match = of_match_device(ti_opp_supply_of_match, dev); - if (!match) { - /* We do not expect this to happen */ - dev_err(dev, "%s: Unable to match device\n", __func__); - return -ENODEV; - } - if (!match->data) { + of_data = device_get_match_data(dev); + if (!of_data) { /* Again, unlikely.. but mistakes do happen */ dev_err(dev, "%s: Bad data in match\n", __func__); return -EINVAL; } - of_data = match->data; - dev_set_drvdata(dev, (void *)of_data); /* If we need optimized voltage */ @@ -400,17 +393,19 @@ static int ti_opp_supply_probe(struct platform_device *pdev) } ret = dev_pm_opp_set_config_regulators(cpu_dev, ti_opp_config_regulators); - if (ret < 0) + if (ret < 0) { _free_optimized_voltages(dev, &opp_data); + return ret; + } - return ret; + return 0; } static struct platform_driver ti_opp_supply_driver = { .probe = ti_opp_supply_probe, .driver = { .name = "ti_opp_supply", - .of_match_table = of_match_ptr(ti_opp_supply_of_match), + .of_match_table = ti_opp_supply_of_match, }, }; module_platform_driver(ti_opp_supply_driver); |
