diff options
Diffstat (limited to 'drivers/pmdomain/core.c')
-rw-r--r-- | drivers/pmdomain/core.c | 477 |
1 files changed, 354 insertions, 123 deletions
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c index 18e232b5ed53..6c94137865c9 100644 --- a/drivers/pmdomain/core.c +++ b/drivers/pmdomain/core.c @@ -7,6 +7,7 @@ #define pr_fmt(fmt) "PM: " fmt #include <linux/delay.h> +#include <linux/idr.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/platform_device.h> @@ -23,6 +24,9 @@ #include <linux/cpu.h> #include <linux/debugfs.h> +/* Provides a unique ID for each genpd device */ +static DEFINE_IDA(genpd_ida); + #define GENPD_RETRY_MAX_MS 250 /* Approximate */ #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ @@ -117,6 +121,48 @@ static const struct genpd_lock_ops genpd_spin_ops = { .unlock = genpd_unlock_spin, }; +static void genpd_lock_raw_spin(struct generic_pm_domain *genpd) + __acquires(&genpd->raw_slock) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&genpd->raw_slock, flags); + genpd->raw_lock_flags = flags; +} + +static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd, + int depth) + __acquires(&genpd->raw_slock) +{ + unsigned long flags; + + raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth); + genpd->raw_lock_flags = flags; +} + +static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd) + __acquires(&genpd->raw_slock) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&genpd->raw_slock, flags); + genpd->raw_lock_flags = flags; + return 0; +} + +static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd) + __releases(&genpd->raw_slock) +{ + raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags); +} + +static const struct genpd_lock_ops genpd_raw_spin_ops = { + .lock = genpd_lock_raw_spin, + .lock_nested = genpd_lock_nested_raw_spin, + .lock_interruptible = genpd_lock_interruptible_raw_spin, + .unlock = genpd_unlock_raw_spin, +}; + #define genpd_lock(p) p->lock_ops->lock(p) #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) @@ -129,6 +175,7 @@ static const struct genpd_lock_ops genpd_spin_ops = { #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON) #define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW) +#define genpd_is_dev_name_fw(genpd) (genpd->flags & GENPD_FLAG_DEV_NAME_FW) static inline bool irq_safe_dev_in_sleep_domain(struct device *dev, const struct generic_pm_domain *genpd) @@ -147,7 +194,7 @@ static inline bool irq_safe_dev_in_sleep_domain(struct device *dev, if (ret) dev_warn_once(dev, "PM domain %s will not be powered off\n", - genpd->name); + dev_name(&genpd->dev)); return ret; } @@ -184,6 +231,16 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev) return pd_to_genpd(dev->pm_domain); } +struct device *dev_to_genpd_dev(struct device *dev) +{ + struct generic_pm_domain *genpd = dev_to_genpd(dev); + + if (IS_ERR(genpd)) + return ERR_CAST(genpd); + + return &genpd->dev; +} + static int genpd_stop_dev(const struct generic_pm_domain *genpd, struct device *dev) { @@ -222,7 +279,7 @@ static void genpd_debug_remove(struct generic_pm_domain *genpd) if (!genpd_debugfs_dir) return; - debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir); + debugfs_lookup_and_remove(dev_name(&genpd->dev), genpd_debugfs_dir); } static void genpd_update_accounting(struct generic_pm_domain *genpd) @@ -311,72 +368,102 @@ static int genpd_xlate_performance_state(struct generic_pm_domain *genpd, } static int _genpd_set_performance_state(struct generic_pm_domain *genpd, - unsigned int state, int depth) + unsigned int state, int depth); + +static void _genpd_rollback_parent_state(struct gpd_link *link, int depth) { - struct generic_pm_domain *parent; - struct gpd_link *link; - int parent_state, ret; + struct generic_pm_domain *parent = link->parent; + int parent_state; - if (state == genpd->performance_state) - return 0; + genpd_lock_nested(parent, depth + 1); - /* Propagate to parents of genpd */ - list_for_each_entry(link, &genpd->child_links, child_node) { - parent = link->parent; + parent_state = link->prev_performance_state; + link->performance_state = parent_state; - /* Find parent's performance state */ - ret = genpd_xlate_performance_state(genpd, parent, state); - if (unlikely(ret < 0)) - goto err; + parent_state = _genpd_reeval_performance_state(parent, parent_state); + if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { + pr_err("%s: Failed to roll back to %d performance state\n", + parent->name, parent_state); + } - parent_state = ret; + genpd_unlock(parent); +} - genpd_lock_nested(parent, depth + 1); +static int _genpd_set_parent_state(struct generic_pm_domain *genpd, + struct gpd_link *link, + unsigned int state, int depth) +{ + struct generic_pm_domain *parent = link->parent; + int parent_state, ret; - link->prev_performance_state = link->performance_state; - link->performance_state = parent_state; - parent_state = _genpd_reeval_performance_state(parent, - parent_state); - ret = _genpd_set_performance_state(parent, parent_state, depth + 1); - if (ret) - link->performance_state = link->prev_performance_state; + /* Find parent's performance state */ + ret = genpd_xlate_performance_state(genpd, parent, state); + if (unlikely(ret < 0)) + return ret; - genpd_unlock(parent); + parent_state = ret; - if (ret) - goto err; - } + genpd_lock_nested(parent, depth + 1); - if (genpd->set_performance_state) { - ret = genpd->set_performance_state(genpd, state); - if (ret) - goto err; - } + link->prev_performance_state = link->performance_state; + link->performance_state = parent_state; - genpd->performance_state = state; - return 0; + parent_state = _genpd_reeval_performance_state(parent, parent_state); + ret = _genpd_set_performance_state(parent, parent_state, depth + 1); + if (ret) + link->performance_state = link->prev_performance_state; -err: - /* Encountered an error, lets rollback */ - list_for_each_entry_continue_reverse(link, &genpd->child_links, - child_node) { - parent = link->parent; + genpd_unlock(parent); - genpd_lock_nested(parent, depth + 1); + return ret; +} + +static int _genpd_set_performance_state(struct generic_pm_domain *genpd, + unsigned int state, int depth) +{ + struct gpd_link *link = NULL; + int ret; - parent_state = link->prev_performance_state; - link->performance_state = parent_state; + if (state == genpd->performance_state) + return 0; - parent_state = _genpd_reeval_performance_state(parent, - parent_state); - if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { - pr_err("%s: Failed to roll back to %d performance state\n", - parent->name, parent_state); + /* When scaling up, propagate to parents first in normal order */ + if (state > genpd->performance_state) { + list_for_each_entry(link, &genpd->child_links, child_node) { + ret = _genpd_set_parent_state(genpd, link, state, depth); + if (ret) + goto rollback_parents_up; } + } - genpd_unlock(parent); + if (genpd->set_performance_state) { + ret = genpd->set_performance_state(genpd, state); + if (ret) { + if (link) + goto rollback_parents_up; + return ret; + } + } + + /* When scaling down, propagate to parents last in reverse order */ + if (state < genpd->performance_state) { + list_for_each_entry_reverse(link, &genpd->child_links, child_node) { + ret = _genpd_set_parent_state(genpd, link, state, depth); + if (ret) + goto rollback_parents_down; + } } + genpd->performance_state = state; + return 0; + +rollback_parents_up: + list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node) + _genpd_rollback_parent_state(link, depth); + return ret; +rollback_parents_down: + list_for_each_entry_continue(link, &genpd->child_links, child_node) + _genpd_rollback_parent_state(link, depth); return ret; } @@ -548,6 +635,68 @@ void dev_pm_genpd_synced_poweroff(struct device *dev) } EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff); +/** + * dev_pm_genpd_set_hwmode() - Set the HW mode for the device and its PM domain. + * + * @dev: Device for which the HW-mode should be changed. + * @enable: Value to set or unset the HW-mode. + * + * Some PM domains can rely on HW signals to control the power for a device. To + * allow a consumer driver to switch the behaviour for its device in runtime, + * which may be beneficial from a latency or energy point of view, this function + * may be called. + * + * It is assumed that the users guarantee that the genpd wouldn't be detached + * while this routine is getting called. + * + * Return: Returns 0 on success and negative error values on failures. + */ +int dev_pm_genpd_set_hwmode(struct device *dev, bool enable) +{ + struct generic_pm_domain *genpd; + int ret = 0; + + genpd = dev_to_genpd_safe(dev); + if (!genpd) + return -ENODEV; + + if (!genpd->set_hwmode_dev) + return -EOPNOTSUPP; + + genpd_lock(genpd); + + if (dev_gpd_data(dev)->hw_mode == enable) + goto out; + + ret = genpd->set_hwmode_dev(genpd, dev, enable); + if (!ret) + dev_gpd_data(dev)->hw_mode = enable; + +out: + genpd_unlock(genpd); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_genpd_set_hwmode); + +/** + * dev_pm_genpd_get_hwmode() - Get the HW mode setting for the device. + * + * @dev: Device for which the current HW-mode setting should be fetched. + * + * This helper function allows consumer drivers to fetch the current HW mode + * setting of its the device. + * + * It is assumed that the users guarantee that the genpd wouldn't be detached + * while this routine is getting called. + * + * Return: Returns the HW mode setting of device from SW cached hw_mode. + */ +bool dev_pm_genpd_get_hwmode(struct device *dev) +{ + return dev_gpd_data(dev)->hw_mode; +} +EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode); + static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) { unsigned int state_idx = genpd->state_idx; @@ -587,7 +736,7 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) genpd->states[state_idx].power_on_latency_ns = elapsed_ns; genpd->gd->max_off_time_changed = true; pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", - genpd->name, "on", elapsed_ns); + dev_name(&genpd->dev), "on", elapsed_ns); out: raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); @@ -638,7 +787,7 @@ static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) genpd->states[state_idx].power_off_latency_ns = elapsed_ns; genpd->gd->max_off_time_changed = true; pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", - genpd->name, "off", elapsed_ns); + dev_name(&genpd->dev), "off", elapsed_ns); out: raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, @@ -1100,6 +1249,7 @@ static int __init genpd_power_off_unused(void) return 0; } + pr_info("genpd: Disabling unused power domains\n"); mutex_lock(&gpd_list_lock); list_for_each_entry(genpd, &gpd_list, gpd_list_node) @@ -1147,8 +1297,12 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, /* Choose the deepest state when suspending */ genpd->state_idx = genpd->state_count - 1; - if (_genpd_power_off(genpd, false)) + if (_genpd_power_off(genpd, false)) { + genpd->states[genpd->state_idx].rejected++; return; + } else { + genpd->states[genpd->state_idx].usage++; + } genpd->status = GENPD_STATE_OFF; @@ -1220,10 +1374,7 @@ static int genpd_prepare(struct device *dev) return -EINVAL; genpd_lock(genpd); - - if (genpd->prepared_count++ == 0) - genpd->suspended_count = 0; - + genpd->prepared_count++; genpd_unlock(genpd); ret = pm_generic_prepare(dev); @@ -1576,6 +1727,7 @@ static void genpd_free_dev_data(struct device *dev, spin_unlock_irq(&dev->power.lock); + dev_pm_opp_clear_config(gpd_data->opp_token); kfree(gpd_data->td); kfree(gpd_data); dev_pm_put_subsys_data(dev); @@ -1645,6 +1797,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, gpd_data->cpu = genpd_get_cpu(genpd, base_dev); + gpd_data->hw_mode = genpd->get_hwmode_dev ? genpd->get_hwmode_dev(genpd, dev) : false; + ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; if (ret) goto out; @@ -1652,7 +1806,6 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, genpd_lock(genpd); genpd_set_cpumask(genpd, gpd_data->cpu); - dev_pm_domain_set(dev, &genpd->domain); genpd->device_count++; if (gd) @@ -1661,6 +1814,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); genpd_unlock(genpd); + dev_pm_domain_set(dev, &genpd->domain); out: if (ret) genpd_free_dev_data(dev, gpd_data); @@ -1717,12 +1871,13 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, genpd->gd->max_off_time_changed = true; genpd_clear_cpumask(genpd, gpd_data->cpu); - dev_pm_domain_set(dev, NULL); list_del_init(&pdd->list_node); genpd_unlock(genpd); + dev_pm_domain_set(dev, NULL); + if (genpd->detach_dev) genpd->detach_dev(genpd, dev); @@ -1791,7 +1946,7 @@ int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb) if (ret) { dev_warn(dev, "failed to add notifier for PM domain %s\n", - genpd->name); + dev_name(&genpd->dev)); return ret; } @@ -1838,7 +1993,7 @@ int dev_pm_genpd_remove_notifier(struct device *dev) if (ret) { dev_warn(dev, "failed to remove notifier for PM domain %s\n", - genpd->name); + dev_name(&genpd->dev)); return ret; } @@ -1864,7 +2019,7 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd, */ if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { WARN(1, "Parent %s of subdomain %s must be IRQ safe\n", - genpd->name, subdomain->name); + dev_name(&genpd->dev), subdomain->name); return -EINVAL; } @@ -1939,7 +2094,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, if (!list_empty(&subdomain->parent_links) || subdomain->device_count) { pr_warn("%s: unable to remove subdomain %s\n", - genpd->name, subdomain->name); + dev_name(&genpd->dev), subdomain->name); ret = -EBUSY; goto out; } @@ -1987,6 +2142,11 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd) return 0; } +static void genpd_provider_release(struct device *dev) +{ + /* nothing to be done here */ +} + static int genpd_alloc_data(struct generic_pm_domain *genpd) { struct genpd_governor_data *gd = NULL; @@ -2017,8 +2177,25 @@ static int genpd_alloc_data(struct generic_pm_domain *genpd) } genpd->gd = gd; - return 0; + device_initialize(&genpd->dev); + genpd->dev.release = genpd_provider_release; + if (!genpd_is_dev_name_fw(genpd)) { + dev_set_name(&genpd->dev, "%s", genpd->name); + } else { + ret = ida_alloc(&genpd_ida, GFP_KERNEL); + if (ret < 0) + goto put; + + genpd->device_id = ret; + dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id); + } + + return 0; +put: + put_device(&genpd->dev); + if (genpd->free_states == genpd_free_default_power_state) + kfree(genpd->states); free: if (genpd_is_cpu_domain(genpd)) free_cpumask_var(genpd->cpus); @@ -2028,6 +2205,9 @@ free: static void genpd_free_data(struct generic_pm_domain *genpd) { + put_device(&genpd->dev); + if (genpd->device_id != -ENXIO) + ida_free(&genpd_ida, genpd->device_id); if (genpd_is_cpu_domain(genpd)) free_cpumask_var(genpd->cpus); if (genpd->free_states) @@ -2037,7 +2217,10 @@ static void genpd_free_data(struct generic_pm_domain *genpd) static void genpd_lock_init(struct generic_pm_domain *genpd) { - if (genpd->flags & GENPD_FLAG_IRQ_SAFE) { + if (genpd_is_cpu_domain(genpd)) { + raw_spin_lock_init(&genpd->raw_slock); + genpd->lock_ops = &genpd_raw_spin_ops; + } else if (genpd_is_irq_safe(genpd)) { spin_lock_init(&genpd->slock); genpd->lock_ops = &genpd_spin_ops; } else { @@ -2073,6 +2256,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd, genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; genpd->device_count = 0; genpd->provider = NULL; + genpd->device_id = -ENXIO; genpd->has_provider = false; genpd->accounting_time = ktime_get_mono_fast_ns(); genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; @@ -2112,9 +2296,6 @@ int pm_genpd_init(struct generic_pm_domain *genpd, if (ret) return ret; - device_initialize(&genpd->dev); - dev_set_name(&genpd->dev, "%s", genpd->name); - mutex_lock(&gpd_list_lock); list_add(&genpd->gpd_list_node, &gpd_list); mutex_unlock(&gpd_list_lock); @@ -2135,13 +2316,13 @@ static int genpd_remove(struct generic_pm_domain *genpd) if (genpd->has_provider) { genpd_unlock(genpd); - pr_err("Provider present, unable to remove %s\n", genpd->name); + pr_err("Provider present, unable to remove %s\n", dev_name(&genpd->dev)); return -EBUSY; } if (!list_empty(&genpd->parent_links) || genpd->device_count) { genpd_unlock(genpd); - pr_err("%s: unable to remove %s\n", __func__, genpd->name); + pr_err("%s: unable to remove %s\n", __func__, dev_name(&genpd->dev)); return -EBUSY; } @@ -2157,7 +2338,7 @@ static int genpd_remove(struct generic_pm_domain *genpd) cancel_work_sync(&genpd->power_off_work); genpd_free_data(genpd); - pr_debug("%s: removed %s\n", __func__, genpd->name); + pr_debug("%s: removed %s\n", __func__, dev_name(&genpd->dev)); return 0; } @@ -2235,7 +2416,7 @@ static DEFINE_MUTEX(of_genpd_mutex); * to be a valid pointer to struct generic_pm_domain. */ static struct generic_pm_domain *genpd_xlate_simple( - struct of_phandle_args *genpdspec, + const struct of_phandle_args *genpdspec, void *data) { return data; @@ -2252,7 +2433,7 @@ static struct generic_pm_domain *genpd_xlate_simple( * the genpd_onecell_data struct when registering the provider. */ static struct generic_pm_domain *genpd_xlate_onecell( - struct of_phandle_args *genpdspec, + const struct of_phandle_args *genpdspec, void *data) { struct genpd_onecell_data *genpd_data = data; @@ -2495,7 +2676,7 @@ EXPORT_SYMBOL_GPL(of_genpd_del_provider); * on failure. */ static struct generic_pm_domain *genpd_get_from_provider( - struct of_phandle_args *genpdspec) + const struct of_phandle_args *genpdspec) { struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); struct of_genpd_provider *provider; @@ -2526,7 +2707,7 @@ static struct generic_pm_domain *genpd_get_from_provider( * Looks-up an I/O PM domain based upon phandle args provided and adds * the device to the PM domain. Returns a negative error code on failure. */ -int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev) +int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev) { struct generic_pm_domain *genpd; int ret; @@ -2560,8 +2741,8 @@ EXPORT_SYMBOL_GPL(of_genpd_add_device); * provided and adds the subdomain to the parent PM domain. Returns a * negative error code on failure. */ -int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, - struct of_phandle_args *subdomain_spec) +int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec, + const struct of_phandle_args *subdomain_spec) { struct generic_pm_domain *parent, *subdomain; int ret; @@ -2598,8 +2779,8 @@ EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); * provided and removes the subdomain from the parent PM domain. Returns a * negative error code on failure. */ -int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec, - struct of_phandle_args *subdomain_spec) +int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec, + const struct of_phandle_args *subdomain_spec) { struct generic_pm_domain *parent, *subdomain; int ret; @@ -2732,12 +2913,58 @@ static void genpd_dev_pm_sync(struct device *dev) genpd_queue_power_off_work(pd); } +static int genpd_set_required_opp_dev(struct device *dev, + struct device *base_dev) +{ + struct dev_pm_opp_config config = { + .required_dev = dev, + }; + int ret; + + /* Limit support to non-providers for now. */ + if (of_property_present(base_dev->of_node, "#power-domain-cells")) + return 0; + + if (!dev_pm_opp_of_has_required_opp(base_dev)) + return 0; + + ret = dev_pm_opp_set_config(base_dev, &config); + if (ret < 0) + return ret; + + dev_gpd_data(dev)->opp_token = ret; + return 0; +} + +static int genpd_set_required_opp(struct device *dev, unsigned int index) +{ + int ret, pstate; + + /* Set the default performance state */ + pstate = of_get_required_opp_performance_state(dev->of_node, index); + if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) { + ret = pstate; + goto err; + } else if (pstate > 0) { + ret = dev_pm_genpd_set_performance_state(dev, pstate); + if (ret) + goto err; + dev_gpd_data(dev)->default_pstate = pstate; + } + + return 0; +err: + dev_err(dev, "failed to set required performance state for power-domain %s: %d\n", + dev_to_genpd(dev)->name, ret); + return ret; +} + static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, - unsigned int index, bool power_on) + unsigned int index, unsigned int num_domains, + bool power_on) { struct of_phandle_args pd_args; struct generic_pm_domain *pd; - int pstate; int ret; ret = of_parse_phandle_with_args(dev->of_node, "power-domains", @@ -2766,18 +2993,21 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, dev->pm_domain->detach = genpd_dev_pm_detach; dev->pm_domain->sync = genpd_dev_pm_sync; - /* Set the default performance state */ - pstate = of_get_required_opp_performance_state(dev->of_node, index); - if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) { - ret = pstate; - goto err; - } else if (pstate > 0) { - ret = dev_pm_genpd_set_performance_state(dev, pstate); + /* + * For a single PM domain the index of the required OPP must be zero, so + * let's try to assign a required dev in that case. In the multiple PM + * domains case, we need platform code to specify the index. + */ + if (num_domains == 1) { + ret = genpd_set_required_opp_dev(dev, base_dev); if (ret) goto err; - dev_gpd_data(dev)->default_pstate = pstate; } + ret = genpd_set_required_opp(dev, index); + if (ret) + goto err; + if (power_on) { genpd_lock(pd); ret = genpd_power_on(pd, 0); @@ -2798,8 +3028,6 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, return 1; err: - dev_err(dev, "failed to set required performance state for power-domain %s: %d\n", - pd->name, ret); genpd_remove_device(pd, dev); return ret; } @@ -2830,7 +3058,7 @@ int genpd_dev_pm_attach(struct device *dev) "#power-domain-cells") != 1) return 0; - return __genpd_dev_pm_attach(dev, dev, 0, true); + return __genpd_dev_pm_attach(dev, dev, 0, 1, true); } EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); @@ -2883,7 +3111,7 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev, } /* Try to attach the device to the PM domain at the specified index. */ - ret = __genpd_dev_pm_attach(virt_dev, dev, index, false); + ret = __genpd_dev_pm_attach(virt_dev, dev, index, num_domains, false); if (ret < 1) { device_unregister(virt_dev); return ret ? ERR_PTR(ret) : NULL; @@ -2952,6 +3180,8 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, if (!err) genpd_state->residency_ns = 1000LL * residency; + of_property_read_string(state_node, "idle-state-name", &genpd_state->name); + genpd_state->power_on_latency_ns = 1000LL * exit_latency; genpd_state->power_off_latency_ns = 1000LL * entry_latency; genpd_state->fwnode = &state_node->fwnode; @@ -3075,7 +3305,7 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev) else WARN_ON(1); - seq_printf(s, "%-25s ", p); + seq_printf(s, "%-26s ", p); } static void perf_status_str(struct seq_file *s, struct device *dev) @@ -3083,7 +3313,17 @@ static void perf_status_str(struct seq_file *s, struct device *dev) struct generic_pm_domain_data *gpd_data; gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); - seq_put_decimal_ull(s, "", gpd_data->performance_state); + + seq_printf(s, "%-10u ", gpd_data->performance_state); +} + +static void mode_status_str(struct seq_file *s, struct device *dev) +{ + struct generic_pm_domain_data *gpd_data; + + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); + + seq_printf(s, "%2s", gpd_data->hw_mode ? "HW" : "SW"); } static int genpd_summary_one(struct seq_file *s, @@ -3094,7 +3334,6 @@ static int genpd_summary_one(struct seq_file *s, [GENPD_STATE_OFF] = "off" }; struct pm_domain_data *pm_data; - const char *kobj_path; struct gpd_link *link; char state[16]; int ret; @@ -3111,12 +3350,12 @@ static int genpd_summary_one(struct seq_file *s, else snprintf(state, sizeof(state), "%s", status_lookup[genpd->status]); - seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state); + seq_printf(s, "%-30s %-30s %u", dev_name(&genpd->dev), state, genpd->performance_state); /* * Modifications on the list require holding locks on both * parent and child, so we are safe. - * Also genpd->name is immutable. + * Also the device name is immutable. */ list_for_each_entry(link, &genpd->parent_links, parent_node) { if (list_is_first(&link->parent_node, &genpd->parent_links)) @@ -3127,16 +3366,10 @@ static int genpd_summary_one(struct seq_file *s, } list_for_each_entry(pm_data, &genpd->dev_list, list_node) { - kobj_path = kobject_get_path(&pm_data->dev->kobj, - genpd_is_irq_safe(genpd) ? - GFP_ATOMIC : GFP_KERNEL); - if (kobj_path == NULL) - continue; - - seq_printf(s, "\n %-50s ", kobj_path); + seq_printf(s, "\n %-30s ", dev_name(pm_data->dev)); rtpm_status_str(s, pm_data->dev); perf_status_str(s, pm_data->dev); - kfree(kobj_path); + mode_status_str(s, pm_data->dev); } seq_puts(s, "\n"); @@ -3151,9 +3384,9 @@ static int summary_show(struct seq_file *s, void *data) struct generic_pm_domain *genpd; int ret = 0; - seq_puts(s, "domain status children performance\n"); - seq_puts(s, " /device runtime status\n"); - seq_puts(s, "----------------------------------------------------------------------------------------------\n"); + seq_puts(s, "domain status children performance\n"); + seq_puts(s, " /device runtime status managed by\n"); + seq_puts(s, "------------------------------------------------------------------------------\n"); ret = mutex_lock_interruptible(&gpd_list_lock); if (ret) @@ -3227,7 +3460,10 @@ static int idle_states_show(struct seq_file *s, void *data) seq_puts(s, "State Time Spent(ms) Usage Rejected\n"); for (i = 0; i < genpd->state_count; i++) { - idle_time += genpd->states[i].idle_time; + struct genpd_power_state *state = &genpd->states[i]; + char state_name[15]; + + idle_time += state->idle_time; if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { now = ktime_get_mono_fast_ns(); @@ -3237,9 +3473,13 @@ static int idle_states_show(struct seq_file *s, void *data) } } + if (!state->name) + snprintf(state_name, ARRAY_SIZE(state_name), "S%-13d", i); + do_div(idle_time, NSEC_PER_MSEC); - seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time, - genpd->states[i].usage, genpd->states[i].rejected); + seq_printf(s, "%-14s %-14llu %-14llu %llu\n", + state->name ?: state_name, idle_time, + state->usage, state->rejected); } genpd_unlock(genpd); @@ -3305,23 +3545,14 @@ static int devices_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; struct pm_domain_data *pm_data; - const char *kobj_path; int ret = 0; ret = genpd_lock_interruptible(genpd); if (ret) return -ERESTARTSYS; - list_for_each_entry(pm_data, &genpd->dev_list, list_node) { - kobj_path = kobject_get_path(&pm_data->dev->kobj, - genpd_is_irq_safe(genpd) ? - GFP_ATOMIC : GFP_KERNEL); - if (kobj_path == NULL) - continue; - - seq_printf(s, "%s\n", kobj_path); - kfree(kobj_path); - } + list_for_each_entry(pm_data, &genpd->dev_list, list_node) + seq_printf(s, "%s\n", dev_name(pm_data->dev)); genpd_unlock(genpd); return ret; @@ -3356,7 +3587,7 @@ static void genpd_debug_add(struct generic_pm_domain *genpd) if (!genpd_debugfs_dir) return; - d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); + d = debugfs_create_dir(dev_name(&genpd->dev), genpd_debugfs_dir); debugfs_create_file("current_state", 0444, d, genpd, &status_fops); |