diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/auxiliary.c | 4 | ||||
-rw-r--r-- | drivers/base/cacheinfo.c | 50 | ||||
-rw-r--r-- | drivers/base/core.c | 81 | ||||
-rw-r--r-- | drivers/base/cpu.c | 3 | ||||
-rw-r--r-- | drivers/base/dd.c | 2 | ||||
-rw-r--r-- | drivers/base/devcoredump.c | 2 | ||||
-rw-r--r-- | drivers/base/faux.c | 3 | ||||
-rw-r--r-- | drivers/base/firmware_loader/main.c | 31 | ||||
-rw-r--r-- | drivers/base/firmware_loader/sysfs.c | 6 | ||||
-rw-r--r-- | drivers/base/node.c | 2 | ||||
-rw-r--r-- | drivers/base/platform.c | 9 | ||||
-rw-r--r-- | drivers/base/power/common.c | 9 | ||||
-rw-r--r-- | drivers/base/power/main.c | 199 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 160 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 2 | ||||
-rw-r--r-- | drivers/base/property.c | 39 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-debugfs.c | 10 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-kunit.c | 2 | ||||
-rw-r--r-- | drivers/base/regmap/regmap.c | 2 | ||||
-rw-r--r-- | drivers/base/topology.c | 2 |
20 files changed, 405 insertions, 213 deletions
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c index dba7c8e13a53..12ffdd843756 100644 --- a/drivers/base/auxiliary.c +++ b/drivers/base/auxiliary.c @@ -217,7 +217,7 @@ static int auxiliary_bus_probe(struct device *dev) struct auxiliary_device *auxdev = to_auxiliary_dev(dev); int ret; - ret = dev_pm_domain_attach(dev, true); + ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON); if (ret) { dev_warn(dev, "Failed to attach to PM Domain : %d\n", ret); return ret; @@ -399,6 +399,7 @@ static void auxiliary_device_release(struct device *dev) { struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + of_node_put(dev->of_node); kfree(auxdev); } @@ -435,6 +436,7 @@ struct auxiliary_device *auxiliary_device_create(struct device *dev, ret = auxiliary_device_init(auxdev); if (ret) { + of_node_put(auxdev->dev.of_node); kfree(auxdev); return NULL; } diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index cf0d455209d7..613410705a47 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -8,6 +8,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/acpi.h> +#include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/cacheinfo.h> #include <linux/compiler.h> @@ -183,6 +184,54 @@ static bool cache_node_is_unified(struct cacheinfo *this_leaf, return of_property_read_bool(np, "cache-unified"); } +static bool match_cache_node(struct device_node *cpu, + const struct device_node *cache_node) +{ + struct device_node *prev, *cache = of_find_next_cache_node(cpu); + + while (cache) { + if (cache == cache_node) { + of_node_put(cache); + return true; + } + + prev = cache; + cache = of_find_next_cache_node(cache); + of_node_put(prev); + } + + return false; +} + +#ifndef arch_compact_of_hwid +#define arch_compact_of_hwid(_x) (_x) +#endif + +static void cache_of_set_id(struct cacheinfo *this_leaf, + struct device_node *cache_node) +{ + struct device_node *cpu; + u32 min_id = ~0; + + for_each_of_cpu_node(cpu) { + u64 id = of_get_cpu_hwid(cpu, 0); + + id = arch_compact_of_hwid(id); + if (FIELD_GET(GENMASK_ULL(63, 32), id)) { + of_node_put(cpu); + return; + } + + if (match_cache_node(cpu, cache_node)) + min_id = min(min_id, id); + } + + if (min_id != ~0) { + this_leaf->id = min_id; + this_leaf->attributes |= CACHE_ID; + } +} + static void cache_of_set_props(struct cacheinfo *this_leaf, struct device_node *np) { @@ -198,6 +247,7 @@ static void cache_of_set_props(struct cacheinfo *this_leaf, cache_get_line_size(this_leaf, np); cache_nr_sets(this_leaf, np); cache_associativity(this_leaf); + cache_of_set_id(this_leaf, np); } static int cache_setup_of_node(unsigned int cpu) diff --git a/drivers/base/core.c b/drivers/base/core.c index cbc0099d8ef2..d22d6b23e758 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -460,9 +460,9 @@ static ssize_t auto_remove_on_show(struct device *dev, struct device_link *link = to_devlink(dev); const char *output; - if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + if (device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER)) output = "supplier unbind"; - else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) + else if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) output = "consumer unbind"; else output = "never"; @@ -476,7 +476,7 @@ static ssize_t runtime_pm_show(struct device *dev, { struct device_link *link = to_devlink(dev); - return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME)); + return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_PM_RUNTIME)); } static DEVICE_ATTR_RO(runtime_pm); @@ -485,8 +485,7 @@ static ssize_t sync_state_only_show(struct device *dev, { struct device_link *link = to_devlink(dev); - return sysfs_emit(buf, "%d\n", - !!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); + return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)); } static DEVICE_ATTR_RO(sync_state_only); @@ -792,12 +791,12 @@ struct device_link *device_link_add(struct device *consumer, if (link->consumer != consumer) continue; - if (link->flags & DL_FLAG_INFERRED && + if (device_link_test(link, DL_FLAG_INFERRED) && !(flags & DL_FLAG_INFERRED)) link->flags &= ~DL_FLAG_INFERRED; if (flags & DL_FLAG_PM_RUNTIME) { - if (!(link->flags & DL_FLAG_PM_RUNTIME)) { + if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) { pm_runtime_new_link(consumer); link->flags |= DL_FLAG_PM_RUNTIME; } @@ -807,8 +806,8 @@ struct device_link *device_link_add(struct device *consumer, if (flags & DL_FLAG_STATELESS) { kref_get(&link->kref); - if (link->flags & DL_FLAG_SYNC_STATE_ONLY && - !(link->flags & DL_FLAG_STATELESS)) { + if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) && + !device_link_test(link, DL_FLAG_STATELESS)) { link->flags |= DL_FLAG_STATELESS; goto reorder; } else { @@ -823,7 +822,7 @@ struct device_link *device_link_add(struct device *consumer, * update the existing link to stay around longer. */ if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { - if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { + if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) { link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; } @@ -831,12 +830,12 @@ struct device_link *device_link_add(struct device *consumer, link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | DL_FLAG_AUTOREMOVE_SUPPLIER); } - if (!(link->flags & DL_FLAG_MANAGED)) { + if (!device_link_test(link, DL_FLAG_MANAGED)) { kref_get(&link->kref); link->flags |= DL_FLAG_MANAGED; device_link_init_status(link, consumer, supplier); } - if (link->flags & DL_FLAG_SYNC_STATE_ONLY && + if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) && !(flags & DL_FLAG_SYNC_STATE_ONLY)) { link->flags &= ~DL_FLAG_SYNC_STATE_ONLY; goto reorder; @@ -940,7 +939,7 @@ static void __device_link_del(struct kref *kref) static void device_link_put_kref(struct device_link *link) { - if (link->flags & DL_FLAG_STATELESS) + if (device_link_test(link, DL_FLAG_STATELESS)) kref_put(&link->kref, __device_link_del); else if (!device_is_registered(link->consumer)) __device_link_del(&link->kref); @@ -1004,7 +1003,7 @@ static void device_links_missing_supplier(struct device *dev) if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { WRITE_ONCE(link->status, DL_STATE_AVAILABLE); } else { - WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); + WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)); WRITE_ONCE(link->status, DL_STATE_DORMANT); } } @@ -1072,14 +1071,14 @@ int device_links_check_suppliers(struct device *dev) device_links_write_lock(); list_for_each_entry(link, &dev->links.suppliers, c_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; if (link->status != DL_STATE_AVAILABLE && - !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) { + !device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) { if (dev_is_best_effort(dev) && - link->flags & DL_FLAG_INFERRED && + device_link_test(link, DL_FLAG_INFERRED) && !link->supplier->can_match) { ret = -EAGAIN; continue; @@ -1128,7 +1127,7 @@ static void __device_links_queue_sync_state(struct device *dev, return; list_for_each_entry(link, &dev->links.consumers, s_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; if (link->status != DL_STATE_ACTIVE) return; @@ -1268,7 +1267,7 @@ void device_links_force_bind(struct device *dev) device_links_write_lock(); list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; if (link->status != DL_STATE_AVAILABLE) { @@ -1329,7 +1328,7 @@ void device_links_driver_bound(struct device *dev) device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; /* @@ -1345,7 +1344,7 @@ void device_links_driver_bound(struct device *dev) WARN_ON(link->status != DL_STATE_DORMANT); WRITE_ONCE(link->status, DL_STATE_AVAILABLE); - if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) + if (device_link_test(link, DL_FLAG_AUTOPROBE_CONSUMER)) driver_deferred_probe_add(link->consumer); } @@ -1357,11 +1356,11 @@ void device_links_driver_bound(struct device *dev) list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { struct device *supplier; - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; supplier = link->supplier; - if (link->flags & DL_FLAG_SYNC_STATE_ONLY) { + if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) { /* * When DL_FLAG_SYNC_STATE_ONLY is set, it means no * other DL_MANAGED_LINK_FLAGS have been set. So, it's @@ -1369,7 +1368,7 @@ void device_links_driver_bound(struct device *dev) */ device_link_drop_managed(link); } else if (dev_is_best_effort(dev) && - link->flags & DL_FLAG_INFERRED && + device_link_test(link, DL_FLAG_INFERRED) && link->status != DL_STATE_CONSUMER_PROBE && !link->supplier->can_match) { /* @@ -1421,10 +1420,10 @@ static void __device_links_no_driver(struct device *dev) struct device_link *link, *ln; list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; - if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { + if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) { device_link_drop_managed(link); continue; } @@ -1436,7 +1435,7 @@ static void __device_links_no_driver(struct device *dev) if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { WRITE_ONCE(link->status, DL_STATE_AVAILABLE); } else { - WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); + WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)); WRITE_ONCE(link->status, DL_STATE_DORMANT); } } @@ -1461,7 +1460,7 @@ void device_links_no_driver(struct device *dev) device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; /* @@ -1498,10 +1497,10 @@ void device_links_driver_cleanup(struct device *dev) device_links_write_lock(); list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; - WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); + WARN_ON(device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)); WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND); /* @@ -1510,7 +1509,7 @@ void device_links_driver_cleanup(struct device *dev) * has moved to DL_STATE_SUPPLIER_UNBIND. */ if (link->status == DL_STATE_SUPPLIER_UNBIND && - link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER)) device_link_drop_managed(link); WRITE_ONCE(link->status, DL_STATE_DORMANT); @@ -1544,7 +1543,7 @@ bool device_links_busy(struct device *dev) device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; if (link->status == DL_STATE_CONSUMER_PROBE @@ -1586,8 +1585,8 @@ void device_links_unbind_consumers(struct device *dev) list_for_each_entry(link, &dev->links.consumers, s_node) { enum device_link_state status; - if (!(link->flags & DL_FLAG_MANAGED) || - link->flags & DL_FLAG_SYNC_STATE_ONLY) + if (!device_link_test(link, DL_FLAG_MANAGED) || + device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) continue; status = link->status; @@ -1743,7 +1742,7 @@ static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode) static void fw_devlink_relax_link(struct device_link *link) { - if (!(link->flags & DL_FLAG_INFERRED)) + if (!device_link_test(link, DL_FLAG_INFERRED)) return; if (device_link_flag_is_sync_state_only(link->flags)) @@ -1779,7 +1778,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data) struct device_link *link = to_devlink(dev); struct device *sup = link->supplier; - if (!(link->flags & DL_FLAG_MANAGED) || + if (!device_link_test(link, DL_FLAG_MANAGED) || link->status == DL_STATE_ACTIVE || sup->state_synced || !dev_has_sync_state(sup)) return 0; @@ -1881,8 +1880,6 @@ static void fw_devlink_unblock_consumers(struct device *dev) device_links_write_unlock(); } -#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev) - static bool fwnode_init_without_drv(struct fwnode_handle *fwnode) { struct device *dev; @@ -2063,7 +2060,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle, * such due to a cycle. */ if (device_link_flag_is_sync_state_only(dev_link->flags) && - !(dev_link->flags & DL_FLAG_CYCLE)) + !device_link_test(dev_link, DL_FLAG_CYCLE)) continue; if (__fw_devlink_relax_cycles(con_handle, @@ -5281,6 +5278,12 @@ void device_set_node(struct device *dev, struct fwnode_handle *fwnode) } EXPORT_SYMBOL_GPL(device_set_node); +struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode) +{ + return get_device((fwnode)->dev); +} +EXPORT_SYMBOL_GPL(get_dev_from_fwnode); + int device_match_name(struct device *dev, const void *name) { return sysfs_streq(dev_name(dev), name); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 7779ab0ca7ce..efc575a00edd 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -602,6 +602,7 @@ CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling); CPU_SHOW_VULN_FALLBACK(ghostwrite); CPU_SHOW_VULN_FALLBACK(old_microcode); CPU_SHOW_VULN_FALLBACK(indirect_target_selection); +CPU_SHOW_VULN_FALLBACK(tsa); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); @@ -620,6 +621,7 @@ static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL); static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL); static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); +static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -639,6 +641,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_ghostwrite.attr, &dev_attr_old_microcode.attr, &dev_attr_indirect_target_selection.attr, + &dev_attr_tsa.attr, NULL }; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index b526e0e0f52d..13ab98e033ea 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -25,6 +25,7 @@ #include <linux/kthread.h> #include <linux/wait.h> #include <linux/async.h> +#include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/pinctrl/devinfo.h> #include <linux/slab.h> @@ -552,6 +553,7 @@ static void device_unbind_cleanup(struct device *dev) dev->dma_range_map = NULL; device_set_driver(dev, NULL); dev_set_drvdata(dev, NULL); + dev_pm_domain_detach(dev, dev->power.detach_power_off); if (dev->pm_domain && dev->pm_domain->dismiss) dev->pm_domain->dismiss(dev); pm_runtime_reinit(dev); diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c index 03a39c417dc4..37faf6156d7c 100644 --- a/drivers/base/devcoredump.c +++ b/drivers/base/devcoredump.c @@ -140,7 +140,7 @@ static const struct bin_attribute *const devcd_dev_bin_attrs[] = { }; static const struct attribute_group devcd_dev_group = { - .bin_attrs_new = devcd_dev_bin_attrs, + .bin_attrs = devcd_dev_bin_attrs, }; static const struct attribute_group *devcd_dev_groups[] = { diff --git a/drivers/base/faux.c b/drivers/base/faux.c index 9054d346bd7f..f5fbda0a9a44 100644 --- a/drivers/base/faux.c +++ b/drivers/base/faux.c @@ -86,6 +86,7 @@ static struct device_driver faux_driver = { .name = "faux_driver", .bus = &faux_bus_type, .probe_type = PROBE_FORCE_SYNCHRONOUS, + .suppress_bind_attrs = true, }; static void faux_device_release(struct device *dev) @@ -169,7 +170,7 @@ struct faux_device *faux_device_create_with_groups(const char *name, * successful is almost impossible to determine by the caller. */ if (!dev->driver) { - dev_err(dev, "probe did not succeed, tearing down the device\n"); + dev_dbg(dev, "probe did not succeed, tearing down the device\n"); faux_device_destroy(faux_dev); faux_dev = NULL; } diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 44486b2c7172..6942c62fa59d 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -822,26 +822,6 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, {} #endif -/* - * Reject firmware file names with ".." path components. - * There are drivers that construct firmware file names from device-supplied - * strings, and we don't want some device to be able to tell us "I would like to - * be sent my firmware from ../../../etc/shadow, please". - * - * Search for ".." surrounded by either '/' or start/end of string. - * - * This intentionally only looks at the firmware name, not at the firmware base - * directory or at symlink contents. - */ -static bool name_contains_dotdot(const char *name) -{ - size_t name_len = strlen(name); - - return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 || - strstr(name, "/../") != NULL || - (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0); -} - /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, @@ -862,6 +842,17 @@ _request_firmware(const struct firmware **firmware_p, const char *name, goto out; } + + /* + * Reject firmware file names with ".." path components. + * There are drivers that construct firmware file names from + * device-supplied strings, and we don't want some device to be + * able to tell us "I would like to be sent my firmware from + * ../../../etc/shadow, please". + * + * This intentionally only looks at the firmware name, not at + * the firmware base directory or at symlink contents. + */ if (name_contains_dotdot(name)) { dev_warn(device, "Firmware load for '%s' refused, path contains '..' component\n", diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c index d254ceb56d84..add0b9b75edd 100644 --- a/drivers/base/firmware_loader/sysfs.c +++ b/drivers/base/firmware_loader/sysfs.c @@ -359,8 +359,8 @@ out: static const struct bin_attribute firmware_attr_data = { .attr = { .name = "data", .mode = 0644 }, .size = 0, - .read_new = firmware_data_read, - .write_new = firmware_data_write, + .read = firmware_data_read, + .write = firmware_data_write, }; static struct attribute *fw_dev_attrs[] = { @@ -381,7 +381,7 @@ static const struct bin_attribute *const fw_dev_bin_attrs[] = { static const struct attribute_group fw_dev_attr_group = { .attrs = fw_dev_attrs, - .bin_attrs_new = fw_dev_bin_attrs, + .bin_attrs = fw_dev_bin_attrs, #ifdef CONFIG_FW_UPLOAD .is_visible = fw_upload_is_visible, #endif diff --git a/drivers/base/node.c b/drivers/base/node.c index c19094481630..9328b81c2f47 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -597,7 +597,7 @@ static const struct bin_attribute *node_dev_bin_attrs[] = { static const struct attribute_group node_dev_group = { .attrs = node_dev_attrs, - .bin_attrs_new = node_dev_bin_attrs, + .bin_attrs = node_dev_bin_attrs, }; static const struct attribute_group *node_dev_groups[] = { diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 075ec1d1b73a..09450349cf32 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1396,15 +1396,13 @@ static int platform_probe(struct device *_dev) if (ret < 0) return ret; - ret = dev_pm_domain_attach(_dev, true); + ret = dev_pm_domain_attach(_dev, PD_FLAG_ATTACH_POWER_ON | + PD_FLAG_DETACH_POWER_OFF); if (ret) goto out; - if (drv->probe) { + if (drv->probe) ret = drv->probe(dev); - if (ret) - dev_pm_domain_detach(_dev, true); - } out: if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { @@ -1422,7 +1420,6 @@ static void platform_remove(struct device *_dev) if (drv->remove) drv->remove(dev); - dev_pm_domain_detach(_dev, true); } static void platform_shutdown(struct device *_dev) diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 781968a128ff..6ecf9ce4a4e6 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -83,7 +83,7 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); /** * dev_pm_domain_attach - Attach a device to its PM domain. * @dev: Device to attach. - * @power_on: Used to indicate whether we should power on the device. + * @flags: indicate whether we should power on/off the device on attach/detach * * The @dev may only be attached to a single PM domain. By iterating through * the available alternatives we try to find a valid PM domain for the device. @@ -100,17 +100,20 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); * Returns 0 on successfully attached PM domain, or when it is found that the * device doesn't need a PM domain, else a negative error code. */ -int dev_pm_domain_attach(struct device *dev, bool power_on) +int dev_pm_domain_attach(struct device *dev, u32 flags) { int ret; if (dev->pm_domain) return 0; - ret = acpi_dev_pm_attach(dev, power_on); + ret = acpi_dev_pm_attach(dev, !!(flags & PD_FLAG_ATTACH_POWER_ON)); if (!ret) ret = genpd_dev_pm_attach(dev); + if (dev->pm_domain) + dev->power.detach_power_off = !!(flags & PD_FLAG_DETACH_POWER_OFF); + return ret < 0 ? ret : 0; } EXPORT_SYMBOL_GPL(dev_pm_domain_attach); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 19fd55b8ac77..bb382a70d260 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -513,7 +513,7 @@ struct dpm_watchdog { */ static void dpm_watchdog_handler(struct timer_list *t) { - struct dpm_watchdog *wd = from_timer(wd, t, timer); + struct dpm_watchdog *wd = timer_container_of(wd, t, timer); struct timer_list *timer = &wd->timer; unsigned int time_left; @@ -638,16 +638,36 @@ static int dpm_async_with_cleanup(struct device *dev, void *fn) static void dpm_async_resume_children(struct device *dev, async_func_t func) { /* + * Prevent racing with dpm_clear_async_state() during initial list + * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and + * dpm_resume(). + */ + guard(mutex)(&dpm_list_mtx); + + /* * Start processing "async" children of the device unless it's been * started already for them. - * - * This could have been done for the device's "async" consumers too, but - * they either need to wait for their parents or the processing has - * already started for them after their parents were processed. */ device_for_each_child(dev, func, dpm_async_with_cleanup); } +static void dpm_async_resume_subordinate(struct device *dev, async_func_t func) +{ + struct device_link *link; + int idx; + + dpm_async_resume_children(dev, func); + + idx = device_links_read_lock(); + + /* Start processing the device's "async" consumers. */ + list_for_each_entry_rcu(link, &dev->links.consumers, s_node) + if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dpm_async_with_cleanup(link->consumer, func); + + device_links_read_unlock(idx); +} + static void dpm_clear_async_state(struct device *dev) { reinit_completion(&dev->power.completion); @@ -656,7 +676,14 @@ static void dpm_clear_async_state(struct device *dev) static bool dpm_root_device(struct device *dev) { - return !dev->parent; + lockdep_assert_held(&dpm_list_mtx); + + /* + * Since this function is required to run under dpm_list_mtx, the + * list_empty() below will only return true if the device's list of + * consumers is actually empty before calling it. + */ + return !dev->parent && list_empty(&dev->links.suppliers); } static void async_resume_noirq(void *data, async_cookie_t cookie); @@ -740,12 +767,12 @@ Out: TRACE_RESUME(error); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); } - dpm_async_resume_children(dev, async_resume_noirq); + dpm_async_resume_subordinate(dev, async_resume_noirq); } static void async_resume_noirq(void *data, async_cookie_t cookie) @@ -797,7 +824,7 @@ static void dpm_noirq_resume_devices(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, "noirq"); - if (async_error) + if (READ_ONCE(async_error)) dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); @@ -883,12 +910,12 @@ Out: complete_all(&dev->power.completion); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async early" : " early", error); } - dpm_async_resume_children(dev, async_resume_early); + dpm_async_resume_subordinate(dev, async_resume_early); } static void async_resume_early(void *data, async_cookie_t cookie) @@ -944,7 +971,7 @@ void dpm_resume_early(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, "early"); - if (async_error) + if (READ_ONCE(async_error)) dpm_save_failed_step(SUSPEND_RESUME_EARLY); trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); @@ -985,6 +1012,8 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) if (!dev->power.is_suspended) goto Complete; + dev->power.is_suspended = false; + if (dev->power.direct_complete) { /* * Allow new children to be added under the device after this @@ -1047,7 +1076,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) End: error = dpm_run_callback(callback, dev, state, info); - dev->power.is_suspended = false; device_unlock(dev); dpm_watchdog_clear(&wd); @@ -1058,12 +1086,12 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) TRACE_RESUME(error); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async" : "", error); } - dpm_async_resume_children(dev, async_resume); + dpm_async_resume_subordinate(dev, async_resume); } static void async_resume(void *data, async_cookie_t cookie) @@ -1087,7 +1115,6 @@ void dpm_resume(pm_message_t state) ktime_t starttime = ktime_get(); trace_suspend_resume(TPS("dpm_resume"), state.event, true); - might_sleep(); pm_transition = state; async_error = 0; @@ -1123,7 +1150,7 @@ void dpm_resume(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, NULL); - if (async_error) + if (READ_ONCE(async_error)) dpm_save_failed_step(SUSPEND_RESUME); cpufreq_resume(); @@ -1190,7 +1217,6 @@ void dpm_complete(pm_message_t state) struct list_head list; trace_suspend_resume(TPS("dpm_complete"), state.event, true); - might_sleep(); INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); @@ -1229,6 +1255,7 @@ void dpm_complete(pm_message_t state) void dpm_resume_end(pm_message_t state) { dpm_resume(state); + pm_restore_gfp_mask(); dpm_complete(state); } EXPORT_SYMBOL_GPL(dpm_resume_end); @@ -1249,10 +1276,15 @@ static bool dpm_leaf_device(struct device *dev) return false; } - return true; + /* + * Since this function is required to run under dpm_list_mtx, the + * list_empty() below will only return true if the device's list of + * consumers is actually empty before calling it. + */ + return list_empty(&dev->links.consumers); } -static void dpm_async_suspend_parent(struct device *dev, async_func_t func) +static bool dpm_async_suspend_parent(struct device *dev, async_func_t func) { guard(mutex)(&dpm_list_mtx); @@ -1264,11 +1296,47 @@ static void dpm_async_suspend_parent(struct device *dev, async_func_t func) * deleted before it. */ if (!device_pm_initialized(dev)) - return; + return false; /* Start processing the device's parent if it is "async". */ if (dev->parent) dpm_async_with_cleanup(dev->parent, func); + + return true; +} + +static void dpm_async_suspend_superior(struct device *dev, async_func_t func) +{ + struct device_link *link; + int idx; + + if (!dpm_async_suspend_parent(dev, func)) + return; + + idx = device_links_read_lock(); + + /* Start processing the device's "async" suppliers. */ + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) + if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dpm_async_with_cleanup(link->supplier, func); + + device_links_read_unlock(idx); +} + +static void dpm_async_suspend_complete_all(struct list_head *device_list) +{ + struct device *dev; + + guard(mutex)(&async_wip_mtx); + + list_for_each_entry_reverse(dev, device_list, power.entry) { + /* + * In case the device is being waited for and async processing + * has not started for it yet, let the waiters make progress. + */ + if (!dev->power.work_in_progress) + complete_all(&dev->power.completion); + } } /** @@ -1319,7 +1387,7 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie); * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async) +static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1330,7 +1398,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy dpm_wait_for_subordinate(dev, async); - if (async_error) + if (READ_ONCE(async_error)) goto Complete; if (dev->power.syscore || dev->power.direct_complete) @@ -1363,7 +1431,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy Run: error = dpm_run_callback(callback, dev, state, info); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); goto Complete; @@ -1389,12 +1457,10 @@ Complete: complete_all(&dev->power.completion); TRACE_SUSPEND(error); - if (error || async_error) - return error; - - dpm_async_suspend_parent(dev, async_suspend_noirq); + if (error || READ_ONCE(async_error)) + return; - return 0; + dpm_async_suspend_superior(dev, async_suspend_noirq); } static void async_suspend_noirq(void *data, async_cookie_t cookie) @@ -1409,7 +1475,7 @@ static int dpm_noirq_suspend_devices(pm_message_t state) { ktime_t starttime = ktime_get(); struct device *dev; - int error = 0; + int error; trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); @@ -1440,18 +1506,19 @@ static int dpm_noirq_suspend_devices(pm_message_t state) mutex_unlock(&dpm_list_mtx); - error = device_suspend_noirq(dev, state, false); + device_suspend_noirq(dev, state, false); put_device(dev); mutex_lock(&dpm_list_mtx); - if (error || async_error) { + if (READ_ONCE(async_error)) { + dpm_async_suspend_complete_all(&dpm_late_early_list); /* * Move all devices to the target list to resume them * properly. */ - list_splice(&dpm_late_early_list, &dpm_noirq_list); + list_splice_init(&dpm_late_early_list, &dpm_noirq_list); break; } } @@ -1459,9 +1526,8 @@ static int dpm_noirq_suspend_devices(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - if (!error) - error = async_error; + error = READ_ONCE(async_error); if (error) dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); @@ -1516,7 +1582,7 @@ static void async_suspend_late(void *data, async_cookie_t cookie); * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_suspend_late(struct device *dev, pm_message_t state, bool async) +static void device_suspend_late(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1533,11 +1599,11 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn dpm_wait_for_subordinate(dev, async); - if (async_error) + if (READ_ONCE(async_error)) goto Complete; if (pm_wakeup_pending()) { - async_error = -EBUSY; + WRITE_ONCE(async_error, -EBUSY); goto Complete; } @@ -1571,7 +1637,7 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn Run: error = dpm_run_callback(callback, dev, state, info); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async late" : " late", error); goto Complete; @@ -1585,12 +1651,10 @@ Complete: TRACE_SUSPEND(error); complete_all(&dev->power.completion); - if (error || async_error) - return error; - - dpm_async_suspend_parent(dev, async_suspend_late); + if (error || READ_ONCE(async_error)) + return; - return 0; + dpm_async_suspend_superior(dev, async_suspend_late); } static void async_suspend_late(void *data, async_cookie_t cookie) @@ -1609,7 +1673,7 @@ int dpm_suspend_late(pm_message_t state) { ktime_t starttime = ktime_get(); struct device *dev; - int error = 0; + int error; trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); @@ -1642,18 +1706,19 @@ int dpm_suspend_late(pm_message_t state) mutex_unlock(&dpm_list_mtx); - error = device_suspend_late(dev, state, false); + device_suspend_late(dev, state, false); put_device(dev); mutex_lock(&dpm_list_mtx); - if (error || async_error) { + if (READ_ONCE(async_error)) { + dpm_async_suspend_complete_all(&dpm_suspended_list); /* * Move all devices to the target list to resume them * properly. */ - list_splice(&dpm_suspended_list, &dpm_late_early_list); + list_splice_init(&dpm_suspended_list, &dpm_late_early_list); break; } } @@ -1661,9 +1726,8 @@ int dpm_suspend_late(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - if (!error) - error = async_error; + error = READ_ONCE(async_error); if (error) { dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_resume_early(resume_event(state)); @@ -1752,7 +1816,7 @@ static void async_suspend(void *data, async_cookie_t cookie); * @state: PM transition of the system being carried out. * @async: If true, the device is being suspended asynchronously. */ -static int device_suspend(struct device *dev, pm_message_t state, bool async) +static void device_suspend(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1764,7 +1828,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) dpm_wait_for_subordinate(dev, async); - if (async_error) { + if (READ_ONCE(async_error)) { dev->power.direct_complete = false; goto Complete; } @@ -1784,7 +1848,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) if (pm_wakeup_pending()) { dev->power.direct_complete = false; - async_error = -EBUSY; + WRITE_ONCE(async_error, -EBUSY); goto Complete; } @@ -1868,7 +1932,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) Complete: if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async" : "", error); } @@ -1876,12 +1940,10 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) complete_all(&dev->power.completion); TRACE_SUSPEND(error); - if (error || async_error) - return error; - - dpm_async_suspend_parent(dev, async_suspend); + if (error || READ_ONCE(async_error)) + return; - return 0; + dpm_async_suspend_superior(dev, async_suspend); } static void async_suspend(void *data, async_cookie_t cookie) @@ -1900,7 +1962,7 @@ int dpm_suspend(pm_message_t state) { ktime_t starttime = ktime_get(); struct device *dev; - int error = 0; + int error; trace_suspend_resume(TPS("dpm_suspend"), state.event, true); might_sleep(); @@ -1935,18 +1997,19 @@ int dpm_suspend(pm_message_t state) mutex_unlock(&dpm_list_mtx); - error = device_suspend(dev, state, false); + device_suspend(dev, state, false); put_device(dev); mutex_lock(&dpm_list_mtx); - if (error || async_error) { + if (READ_ONCE(async_error)) { + dpm_async_suspend_complete_all(&dpm_prepared_list); /* * Move all devices to the target list to resume them * properly. */ - list_splice(&dpm_prepared_list, &dpm_suspended_list); + list_splice_init(&dpm_prepared_list, &dpm_suspended_list); break; } } @@ -1954,9 +2017,8 @@ int dpm_suspend(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - if (!error) - error = async_error; + error = READ_ONCE(async_error); if (error) dpm_save_failed_step(SUSPEND_SUSPEND); @@ -1990,7 +2052,7 @@ static bool device_prepare_smart_suspend(struct device *dev) idx = device_links_read_lock(); list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { - if (!(link->flags & DL_FLAG_PM_RUNTIME)) + if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) continue; if (!dev_pm_smart_suspend(link->supplier) && @@ -2101,7 +2163,6 @@ int dpm_prepare(pm_message_t state) int error = 0; trace_suspend_resume(TPS("dpm_prepare"), state.event, true); - might_sleep(); /* * Give a chance for the known devices to complete their probes, before @@ -2168,8 +2229,10 @@ int dpm_suspend_start(pm_message_t state) error = dpm_prepare(state); if (error) dpm_save_failed_step(SUSPEND_PREPARE); - else + else { + pm_restrict_gfp_mask(); error = dpm_suspend(state); + } dpm_show_time(starttime, state, error, "start"); return error; diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index c55a7c70bc1a..3e84dc4122de 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -19,10 +19,24 @@ typedef int (*pm_callback_t)(struct device *); +static inline pm_callback_t get_callback_ptr(const void *start, size_t offset) +{ + return *(pm_callback_t *)(start + offset); +} + +static pm_callback_t __rpm_get_driver_callback(struct device *dev, + size_t cb_offset) +{ + if (dev->driver && dev->driver->pm) + return get_callback_ptr(dev->driver->pm, cb_offset); + + return NULL; +} + static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) { - pm_callback_t cb; const struct dev_pm_ops *ops; + pm_callback_t cb = NULL; if (dev->pm_domain) ops = &dev->pm_domain->ops; @@ -36,12 +50,10 @@ static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) ops = NULL; if (ops) - cb = *(pm_callback_t *)((void *)ops + cb_offset); - else - cb = NULL; + cb = get_callback_ptr(ops, cb_offset); - if (!cb && dev->driver && dev->driver->pm) - cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); + if (!cb) + cb = __rpm_get_driver_callback(dev, cb_offset); return cb; } @@ -290,7 +302,7 @@ static int rpm_get_suppliers(struct device *dev) device_links_read_lock_held()) { int retval; - if (!(link->flags & DL_FLAG_PM_RUNTIME)) + if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) continue; retval = pm_runtime_get_sync(link->supplier); @@ -1191,10 +1203,12 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume); * * Return -EINVAL if runtime PM is disabled for @dev. * - * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either - * @ign_usage_count is %true or the runtime PM usage counter of @dev is not - * zero, increment the usage counter of @dev and return 1. Otherwise, return 0 - * without changing the usage counter. + * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count + * is set, or (2) @dev is not ignoring children and its active child count is + * nonero, or (3) the runtime PM usage counter of @dev is not zero, increment + * the usage counter of @dev and return 1. + * + * Otherwise, return 0 without changing the usage counter. * * If @ign_usage_count is %true, this function can be used to prevent suspending * the device when its runtime PM status is %RPM_ACTIVE. @@ -1216,7 +1230,8 @@ static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count) retval = -EINVAL; } else if (dev->power.runtime_status != RPM_ACTIVE) { retval = 0; - } else if (ign_usage_count) { + } else if (ign_usage_count || (!dev->power.ignore_children && + atomic_read(&dev->power.child_count) > 0)) { retval = 1; atomic_inc(&dev->power.usage_count); } else { @@ -1249,10 +1264,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_get_if_active); * @dev: Target device. * * Increment the runtime PM usage counter of @dev if its runtime PM status is - * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case - * it returns 1. If the device is in a different state or its usage_count is 0, - * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device, - * in which case also the usage_count will remain unmodified. + * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not + * ignoring children and its active child count is nonzero. 1 is returned in + * this case. + * + * If @dev is in a different state or it is not in use (that is, its usage + * counter is 0, or it is ignoring children, or its active child count is 0), + * 0 is returned. + * + * -EINVAL is returned if runtime PM is disabled for the device, in which case + * also the usage counter of @dev is not updated. */ int pm_runtime_get_if_in_use(struct device *dev) { @@ -1827,7 +1848,7 @@ void pm_runtime_init(struct device *dev) dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; - dev->power.needs_force_resume = 0; + dev->power.needs_force_resume = false; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; @@ -1854,6 +1875,11 @@ void pm_runtime_reinit(struct device *dev) pm_runtime_put(dev->parent); } } + /* + * Clear power.needs_force_resume in case it has been set by + * pm_runtime_force_suspend() invoked from a driver remove callback. + */ + dev->power.needs_force_resume = false; } /** @@ -1879,7 +1905,7 @@ void pm_runtime_get_suppliers(struct device *dev) list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, device_links_read_lock_held()) - if (link->flags & DL_FLAG_PM_RUNTIME) { + if (device_link_test(link, DL_FLAG_PM_RUNTIME)) { link->supplier_preactivated = true; pm_runtime_get_sync(link->supplier); } @@ -1933,7 +1959,7 @@ static void pm_runtime_drop_link_count(struct device *dev) */ void pm_runtime_drop_link(struct device_link *link) { - if (!(link->flags & DL_FLAG_PM_RUNTIME)) + if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) return; pm_runtime_drop_link_count(link->consumer); @@ -1941,13 +1967,23 @@ void pm_runtime_drop_link(struct device_link *link) pm_request_idle(link->supplier); } -bool pm_runtime_need_not_resume(struct device *dev) +static pm_callback_t get_callback(struct device *dev, size_t cb_offset) { - return atomic_read(&dev->power.usage_count) <= 1 && - (atomic_read(&dev->power.child_count) == 0 || - dev->power.ignore_children); + /* + * Setting power.strict_midlayer means that the middle layer + * code does not want its runtime PM callbacks to be invoked via + * pm_runtime_force_suspend() and pm_runtime_force_resume(), so + * return a direct pointer to the driver callback in that case. + */ + if (dev_pm_strict_midlayer_is_set(dev)) + return __rpm_get_driver_callback(dev, cb_offset); + + return __rpm_get_callback(dev, cb_offset); } +#define GET_CALLBACK(dev, callback) \ + get_callback(dev, offsetof(struct dev_pm_ops, callback)) + /** * pm_runtime_force_suspend - Force a device into suspend state if needed. * @dev: Device to suspend. @@ -1964,10 +2000,6 @@ bool pm_runtime_need_not_resume(struct device *dev) * sure the device is put into low power state and it should only be used during * system-wide PM transitions to sleep states. It assumes that the analogous * pm_runtime_force_resume() will be used to resume the device. - * - * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent - * state where this function has called the ->runtime_suspend callback but the - * PM core marks the driver as runtime active. */ int pm_runtime_force_suspend(struct device *dev) { @@ -1975,10 +2007,10 @@ int pm_runtime_force_suspend(struct device *dev) int ret; pm_runtime_disable(dev); - if (pm_runtime_status_suspended(dev)) + if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume) return 0; - callback = RPM_GET_CALLBACK(dev, runtime_suspend); + callback = GET_CALLBACK(dev, runtime_suspend); dev_pm_enable_wake_irq_check(dev, true); ret = callback ? callback(dev) : 0; @@ -1990,15 +2022,16 @@ int pm_runtime_force_suspend(struct device *dev) /* * If the device can stay in suspend after the system-wide transition * to the working state that will follow, drop the children counter of - * its parent, but set its status to RPM_SUSPENDED anyway in case this - * function will be called again for it in the meantime. + * its parent and the usage counters of its suppliers. Otherwise, set + * power.needs_force_resume to let pm_runtime_force_resume() know that + * the device needs to be taken care of and to prevent this function + * from handling the device again in case the device is passed to it + * once more subsequently. */ - if (pm_runtime_need_not_resume(dev)) { + if (pm_runtime_need_not_resume(dev)) pm_runtime_set_suspended(dev); - } else { - __update_runtime_status(dev, RPM_SUSPENDED); - dev->power.needs_force_resume = 1; - } + else + dev->power.needs_force_resume = true; return 0; @@ -2009,33 +2042,37 @@ err: } EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); +#ifdef CONFIG_PM_SLEEP + /** * pm_runtime_force_resume - Force a device into resume state if needed. * @dev: Device to resume. * - * Prior invoking this function we expect the user to have brought the device - * into low power state by a call to pm_runtime_force_suspend(). Here we reverse - * those actions and bring the device into full power, if it is expected to be - * used on system resume. In the other case, we defer the resume to be managed - * via runtime PM. + * This function expects that either pm_runtime_force_suspend() has put the + * device into a low-power state prior to calling it, or the device had been + * runtime-suspended before the preceding system-wide suspend transition and it + * was left in suspend during that transition. * - * Typically this function may be invoked from a system resume callback. + * The actions carried out by pm_runtime_force_suspend(), or by a runtime + * suspend in general, are reversed and the device is brought back into full + * power if it is expected to be used on system resume, which is the case when + * its needs_force_resume flag is set or when its smart_suspend flag is set and + * its runtime PM status is "active". + * + * In other cases, the resume is deferred to be managed via runtime PM. + * + * Typically, this function may be invoked from a system resume callback. */ int pm_runtime_force_resume(struct device *dev) { int (*callback)(struct device *); int ret = 0; - if (!dev->power.needs_force_resume) + if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) || + pm_runtime_status_suspended(dev))) goto out; - /* - * The value of the parent's children counter is correct already, so - * just update the status of the device. - */ - __update_runtime_status(dev, RPM_ACTIVE); - - callback = RPM_GET_CALLBACK(dev, runtime_resume); + callback = GET_CALLBACK(dev, runtime_resume); dev_pm_disable_wake_irq_check(dev, false); ret = callback ? callback(dev) : 0; @@ -2046,9 +2083,30 @@ int pm_runtime_force_resume(struct device *dev) } pm_runtime_mark_last_busy(dev); + out: - dev->power.needs_force_resume = 0; + /* + * The smart_suspend flag can be cleared here because it is not going + * to be necessary until the next system-wide suspend transition that + * will update it again. + */ + dev->power.smart_suspend = false; + /* + * Also clear needs_force_resume to make this function skip devices that + * have been seen by it once. + */ + dev->power.needs_force_resume = false; + pm_runtime_enable(dev); return ret; } EXPORT_SYMBOL_GPL(pm_runtime_force_resume); + +bool pm_runtime_need_not_resume(struct device *dev) +{ + return atomic_read(&dev->power.usage_count) <= 1 && + (atomic_read(&dev->power.child_count) == 0 || + dev->power.ignore_children); +} + +#endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index f7c96a3bf719..d1283ff1080b 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -759,7 +759,7 @@ EXPORT_SYMBOL_GPL(pm_relax); */ static void pm_wakeup_timer_fn(struct timer_list *t) { - struct wakeup_source *ws = from_timer(ws, t, timer); + struct wakeup_source *ws = timer_container_of(ws, t, timer); unsigned long flags; spin_lock_irqsave(&ws->lock, flags); diff --git a/drivers/base/property.c b/drivers/base/property.c index c1392743df9c..f626d5bbe806 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -928,22 +928,49 @@ bool fwnode_device_is_available(const struct fwnode_handle *fwnode) EXPORT_SYMBOL_GPL(fwnode_device_is_available); /** - * device_get_child_node_count - return the number of child nodes for device - * @dev: Device to count the child nodes for + * fwnode_get_child_node_count - return the number of child nodes for a given firmware node + * @fwnode: Pointer to the parent firmware node * - * Return: the number of child nodes for a given device. + * Return: the number of child nodes for a given firmware node. + */ +unsigned int fwnode_get_child_node_count(const struct fwnode_handle *fwnode) +{ + struct fwnode_handle *child; + unsigned int count = 0; + + fwnode_for_each_child_node(fwnode, child) + count++; + + return count; +} +EXPORT_SYMBOL_GPL(fwnode_get_child_node_count); + +/** + * fwnode_get_named_child_node_count - number of child nodes with given name + * @fwnode: Node which child nodes are counted. + * @name: String to match child node name against. + * + * Scan child nodes and count all the nodes with a specific name. Potential + * 'number' -ending after the 'at sign' for scanned names is ignored. + * E.g.:: + * fwnode_get_named_child_node_count(fwnode, "channel"); + * would match all the nodes:: + * channel { }, channel@0 {}, channel@0xabba {}... + * + * Return: the number of child nodes with a matching name for a given device. */ -unsigned int device_get_child_node_count(const struct device *dev) +unsigned int fwnode_get_named_child_node_count(const struct fwnode_handle *fwnode, + const char *name) { struct fwnode_handle *child; unsigned int count = 0; - device_for_each_child_node(dev, child) + fwnode_for_each_named_child_node(fwnode, child, name) count++; return count; } -EXPORT_SYMBOL_GPL(device_get_child_node_count); +EXPORT_SYMBOL_GPL(fwnode_get_named_child_node_count); bool device_dma_supported(const struct device *dev) { diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index fb84cda92a75..c9b4c04b1cf6 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -470,10 +470,6 @@ static ssize_t regmap_cache_only_write_file(struct file *file, if (err) return count; - err = debugfs_file_get(file->f_path.dentry); - if (err) - return err; - map->lock(map->lock_arg); if (new_val && !map->cache_only) { @@ -486,7 +482,6 @@ static ssize_t regmap_cache_only_write_file(struct file *file, map->cache_only = new_val; map->unlock(map->lock_arg); - debugfs_file_put(file->f_path.dentry); if (require_sync) { err = regcache_sync(map); @@ -517,10 +512,6 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file, if (err) return count; - err = debugfs_file_get(file->f_path.dentry); - if (err) - return err; - map->lock(map->lock_arg); if (new_val && !map->cache_bypass) { @@ -532,7 +523,6 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file, map->cache_bypass = new_val; map->unlock(map->lock_arg); - debugfs_file_put(file->f_path.dentry); return count; } diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c index 64ea340950b6..95c5bf2a78ee 100644 --- a/drivers/base/regmap/regmap-kunit.c +++ b/drivers/base/regmap/regmap-kunit.c @@ -736,7 +736,7 @@ static void stride(struct kunit *test) } } -static struct regmap_range_cfg test_range = { +static const struct regmap_range_cfg test_range = { .selector_reg = 1, .selector_mask = 0xff, diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index f2843f814675..1f3f782a04ba 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1173,6 +1173,8 @@ err_name: err_map: kfree(map); err: + if (bus && bus->free_on_exit) + kfree(bus); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(__regmap_init); diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 8b42df05feff..c890e2a5b428 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -179,7 +179,7 @@ static umode_t topology_is_visible(struct kobject *kobj, static const struct attribute_group topology_attr_group = { .attrs = default_attrs, - .bin_attrs_new = bin_attrs, + .bin_attrs = bin_attrs, .is_visible = topology_is_visible, .name = "topology" }; |