diff options
Diffstat (limited to 'drivers/base/core.c')
| -rw-r--r-- | drivers/base/core.c | 1613 |
1 files changed, 1005 insertions, 608 deletions
diff --git a/drivers/base/core.c b/drivers/base/core.c index a3e14143ec0c..40de2f51a1b1 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -9,58 +9,49 @@ */ #include <linux/acpi.h> +#include <linux/blkdev.h> +#include <linux/cleanup.h> #include <linux/cpufreq.h> #include <linux/device.h> +#include <linux/dma-map-ops.h> /* for dma_default_coherent */ #include <linux/err.h> #include <linux/fwnode.h> #include <linux/init.h> +#include <linux/kdev_t.h> #include <linux/kstrtox.h> #include <linux/module.h> -#include <linux/slab.h> -#include <linux/string.h> -#include <linux/kdev_t.h> +#include <linux/mutex.h> +#include <linux/netdevice.h> #include <linux/notifier.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/blkdev.h> -#include <linux/mutex.h> #include <linux/pm_runtime.h> -#include <linux/netdevice.h> -#include <linux/sched/signal.h> #include <linux/sched/mm.h> +#include <linux/sched/signal.h> +#include <linux/slab.h> +#include <linux/string_helpers.h> #include <linux/swiotlb.h> #include <linux/sysfs.h> -#include <linux/dma-map-ops.h> /* for dma_default_coherent */ #include "base.h" #include "physical_location.h" #include "power/power.h" -#ifdef CONFIG_SYSFS_DEPRECATED -#ifdef CONFIG_SYSFS_DEPRECATED_V2 -long sysfs_deprecated = 1; -#else -long sysfs_deprecated = 0; -#endif -static int __init sysfs_deprecated_setup(char *arg) -{ - return kstrtol(arg, 10, &sysfs_deprecated); -} -early_param("sysfs.deprecated", sysfs_deprecated_setup); -#endif - /* Device links support. */ static LIST_HEAD(deferred_sync); static unsigned int defer_sync_state_count = 1; static DEFINE_MUTEX(fwnode_link_lock); static bool fw_devlink_is_permissive(void); +static void __fw_devlink_link_to_consumers(struct device *dev); static bool fw_devlink_drv_reg_done; static bool fw_devlink_best_effort; +static struct workqueue_struct *device_link_wq; /** - * fwnode_link_add - Create a link between two fwnode_handles. + * __fwnode_link_add - Create a link between two fwnode_handles. * @con: Consumer end of the link. * @sup: Supplier end of the link. + * @flags: Link flags. * * Create a fwnode link between fwnode handles @con and @sup. The fwnode link * represents the detail that the firmware lists @sup fwnode as supplying a @@ -74,36 +65,41 @@ static bool fw_devlink_best_effort; * Attempts to create duplicate links between the same pair of fwnode handles * are ignored and there is no reference counting. */ -int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup) +static int __fwnode_link_add(struct fwnode_handle *con, + struct fwnode_handle *sup, u8 flags) { struct fwnode_link *link; - int ret = 0; - - mutex_lock(&fwnode_link_lock); list_for_each_entry(link, &sup->consumers, s_hook) - if (link->consumer == con) - goto out; + if (link->consumer == con) { + link->flags |= flags; + return 0; + } link = kzalloc(sizeof(*link), GFP_KERNEL); - if (!link) { - ret = -ENOMEM; - goto out; - } + if (!link) + return -ENOMEM; link->supplier = sup; INIT_LIST_HEAD(&link->s_hook); link->consumer = con; INIT_LIST_HEAD(&link->c_hook); + link->flags = flags; list_add(&link->s_hook, &sup->consumers); list_add(&link->c_hook, &con->suppliers); - pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n", + pr_debug("%pfwf Linked as a fwnode consumer to %pfwf\n", con, sup); -out: - mutex_unlock(&fwnode_link_lock); - return ret; + return 0; +} + +int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup, + u8 flags) +{ + guard(mutex)(&fwnode_link_lock); + + return __fwnode_link_add(con, sup, flags); } /** @@ -114,7 +110,7 @@ out: */ static void __fwnode_link_del(struct fwnode_link *link) { - pr_debug("%pfwP Dropping the fwnode link to %pfwP\n", + pr_debug("%pfwf Dropping the fwnode link to %pfwf\n", link->consumer, link->supplier); list_del(&link->s_hook); list_del(&link->c_hook); @@ -122,6 +118,19 @@ static void __fwnode_link_del(struct fwnode_link *link) } /** + * __fwnode_link_cycle - Mark a fwnode link as being part of a cycle. + * @link: the fwnode_link to be marked + * + * The fwnode_link_lock needs to be held when this function is called. + */ +static void __fwnode_link_cycle(struct fwnode_link *link) +{ + pr_debug("%pfwf: cycle: depends on %pfwf\n", + link->consumer, link->supplier); + link->flags |= FWLINK_FLAG_CYCLE; +} + +/** * fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle. * @fwnode: fwnode whose supplier links need to be deleted * @@ -131,10 +140,10 @@ static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode) { struct fwnode_link *link, *tmp; - mutex_lock(&fwnode_link_lock); + guard(mutex)(&fwnode_link_lock); + list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) __fwnode_link_del(link); - mutex_unlock(&fwnode_link_lock); } /** @@ -147,10 +156,10 @@ static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode) { struct fwnode_link *link, *tmp; - mutex_lock(&fwnode_link_lock); + guard(mutex)(&fwnode_link_lock); + list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) __fwnode_link_del(link); - mutex_unlock(&fwnode_link_lock); } /** @@ -181,7 +190,51 @@ void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode) } EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers); -#ifdef CONFIG_SRCU +/** + * __fwnode_links_move_consumers - Move consumer from @from to @to fwnode_handle + * @from: move consumers away from this fwnode + * @to: move consumers to this fwnode + * + * Move all consumer links from @from fwnode to @to fwnode. + */ +static void __fwnode_links_move_consumers(struct fwnode_handle *from, + struct fwnode_handle *to) +{ + struct fwnode_link *link, *tmp; + + list_for_each_entry_safe(link, tmp, &from->consumers, s_hook) { + __fwnode_link_add(link->consumer, to, link->flags); + __fwnode_link_del(link); + } +} + +/** + * __fw_devlink_pickup_dangling_consumers - Pick up dangling consumers + * @fwnode: fwnode from which to pick up dangling consumers + * @new_sup: fwnode of new supplier + * + * If the @fwnode has a corresponding struct device and the device supports + * probing (that is, added to a bus), then we want to let fw_devlink create + * MANAGED device links to this device, so leave @fwnode and its descendant's + * fwnode links alone. + * + * Otherwise, move its consumers to the new supplier @new_sup. + */ +static void __fw_devlink_pickup_dangling_consumers(struct fwnode_handle *fwnode, + struct fwnode_handle *new_sup) +{ + struct fwnode_handle *child; + + if (fwnode->dev && fwnode->dev->bus) + return; + + fwnode->flags |= FWNODE_FLAG_NOT_DEVICE; + __fwnode_links_move_consumers(fwnode, new_sup); + + fwnode_for_each_available_child_node(fwnode, child) + __fw_devlink_pickup_dangling_consumers(child, new_sup); +} + static DEFINE_MUTEX(device_links_lock); DEFINE_STATIC_SRCU(device_links_srcu); @@ -220,47 +273,6 @@ static void device_link_remove_from_lists(struct device_link *link) list_del_rcu(&link->s_node); list_del_rcu(&link->c_node); } -#else /* !CONFIG_SRCU */ -static DECLARE_RWSEM(device_links_lock); - -static inline void device_links_write_lock(void) -{ - down_write(&device_links_lock); -} - -static inline void device_links_write_unlock(void) -{ - up_write(&device_links_lock); -} - -int device_links_read_lock(void) -{ - down_read(&device_links_lock); - return 0; -} - -void device_links_read_unlock(int not_used) -{ - up_read(&device_links_lock); -} - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -int device_links_read_lock_held(void) -{ - return lockdep_is_held(&device_links_lock); -} -#endif - -static inline void device_link_synchronize_removal(void) -{ -} - -static void device_link_remove_from_lists(struct device_link *link) -{ - list_del(&link->s_node); - list_del(&link->c_node); -} -#endif /* !CONFIG_SRCU */ static bool device_is_ancestor(struct device *dev, struct device *target) { @@ -272,6 +284,14 @@ static bool device_is_ancestor(struct device *dev, struct device *target) return false; } +#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \ + DL_FLAG_CYCLE | \ + DL_FLAG_MANAGED) +bool device_link_flag_is_sync_state_only(u32 flags) +{ + return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY; +} + /** * device_is_dependent - Check if one device depends on another one * @dev: Device to check dependencies for. @@ -280,7 +300,7 @@ static bool device_is_ancestor(struct device *dev, struct device *target) * Check if @target depends on @dev or any device dependent on it (its child or * its consumer etc). Return 1 if that is the case or 0 otherwise. */ -int device_is_dependent(struct device *dev, void *target) +static int device_is_dependent(struct device *dev, void *target) { struct device_link *link; int ret; @@ -298,8 +318,7 @@ int device_is_dependent(struct device *dev, void *target) return ret; list_for_each_entry(link, &dev->links.consumers, s_node) { - if ((link->flags & ~DL_FLAG_INFERRED) == - (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) + if (device_link_flag_is_sync_state_only(link->flags)) continue; if (link->consumer == target) @@ -372,8 +391,7 @@ static int device_reorder_to_tail(struct device *dev, void *not_used) device_for_each_child(dev, NULL, device_reorder_to_tail); list_for_each_entry(link, &dev->links.consumers, s_node) { - if ((link->flags & ~DL_FLAG_INFERRED) == - (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) + if (device_link_flag_is_sync_state_only(link->flags)) continue; device_reorder_to_tail(link->consumer, NULL); } @@ -442,9 +460,9 @@ static ssize_t auto_remove_on_show(struct device *dev, struct device_link *link = to_devlink(dev); const char *output; - if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + if (device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER)) output = "supplier unbind"; - else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) + else if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) output = "consumer unbind"; else output = "never"; @@ -458,7 +476,7 @@ static ssize_t runtime_pm_show(struct device *dev, { struct device_link *link = to_devlink(dev); - return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME)); + return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_PM_RUNTIME)); } static DEVICE_ATTR_RO(runtime_pm); @@ -467,8 +485,7 @@ static ssize_t sync_state_only_show(struct device *dev, { struct device_link *link = to_devlink(dev); - return sysfs_emit(buf, "%d\n", - !!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); + return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)); } static DEVICE_ATTR_RO(sync_state_only); @@ -514,36 +531,39 @@ static void devlink_dev_release(struct device *dev) /* * It may take a while to complete this work because of the SRCU * synchronization in device_link_release_fn() and if the consumer or - * supplier devices get deleted when it runs, so put it into the "long" - * workqueue. + * supplier devices get deleted when it runs, so put it into the + * dedicated workqueue. */ - queue_work(system_long_wq, &link->rm_work); + queue_work(device_link_wq, &link->rm_work); } -static struct class devlink_class = { +/** + * device_link_wait_removal - Wait for ongoing devlink removal jobs to terminate + */ +void device_link_wait_removal(void) +{ + /* + * devlink removal jobs are queued in the dedicated work queue. + * To be sure that all removal jobs are terminated, ensure that any + * scheduled work has run to completion. + */ + flush_workqueue(device_link_wq); +} +EXPORT_SYMBOL_GPL(device_link_wait_removal); + +static const struct class devlink_class = { .name = "devlink", - .owner = THIS_MODULE, .dev_groups = devlink_groups, .dev_release = devlink_dev_release, }; -static int devlink_add_symlinks(struct device *dev, - struct class_interface *class_intf) +static int devlink_add_symlinks(struct device *dev) { + char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL; int ret; - size_t len; struct device_link *link = to_devlink(dev); struct device *sup = link->supplier; struct device *con = link->consumer; - char *buf; - - len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)), - strlen(dev_bus_name(con)) + strlen(dev_name(con))); - len += strlen(":"); - len += strlen("supplier:") + 1; - buf = kzalloc(len, GFP_KERNEL); - if (!buf) - return -ENOMEM; ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier"); if (ret) @@ -553,59 +573,64 @@ static int devlink_add_symlinks(struct device *dev, if (ret) goto err_con; - snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); - ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf); + buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); + if (!buf_con) { + ret = -ENOMEM; + goto err_con_dev; + } + + ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf_con); if (ret) goto err_con_dev; - snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); - ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf); + buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); + if (!buf_sup) { + ret = -ENOMEM; + goto err_sup_dev; + } + + ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf_sup); if (ret) goto err_sup_dev; goto out; err_sup_dev: - snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); - sysfs_remove_link(&sup->kobj, buf); + sysfs_remove_link(&sup->kobj, buf_con); err_con_dev: sysfs_remove_link(&link->link_dev.kobj, "consumer"); err_con: sysfs_remove_link(&link->link_dev.kobj, "supplier"); out: - kfree(buf); return ret; } -static void devlink_remove_symlinks(struct device *dev, - struct class_interface *class_intf) +static void devlink_remove_symlinks(struct device *dev) { + char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL; struct device_link *link = to_devlink(dev); - size_t len; struct device *sup = link->supplier; struct device *con = link->consumer; - char *buf; sysfs_remove_link(&link->link_dev.kobj, "consumer"); sysfs_remove_link(&link->link_dev.kobj, "supplier"); - len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)), - strlen(dev_bus_name(con)) + strlen(dev_name(con))); - len += strlen(":"); - len += strlen("supplier:") + 1; - buf = kzalloc(len, GFP_KERNEL); - if (!buf) { - WARN(1, "Unable to properly free device link symlinks!\n"); - return; - } - if (device_is_registered(con)) { - snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); - sysfs_remove_link(&con->kobj, buf); + buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); + if (!buf_sup) + goto out; + sysfs_remove_link(&con->kobj, buf_sup); } - snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); - sysfs_remove_link(&sup->kobj, buf); - kfree(buf); + + buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); + if (!buf_con) + goto out; + sysfs_remove_link(&sup->kobj, buf_con); + + return; + +out: + WARN(1, "Unable to properly free device link symlinks!\n"); } static struct class_interface devlink_class_intf = { @@ -634,7 +659,8 @@ postcore_initcall(devlink_class_init); DL_FLAG_AUTOREMOVE_SUPPLIER | \ DL_FLAG_AUTOPROBE_CONSUMER | \ DL_FLAG_SYNC_STATE_ONLY | \ - DL_FLAG_INFERRED) + DL_FLAG_INFERRED | \ + DL_FLAG_CYCLE) #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE) @@ -645,6 +671,9 @@ postcore_initcall(devlink_class_init); * @supplier: Supplier end of the link. * @flags: Link flags. * + * Return: On success, a device_link struct will be returned. + * On error or invalid flag settings, NULL will be returned. + * * The caller is responsible for the proper synchronization of the link creation * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the * runtime PM framework to take the link into account. Second, if the @@ -703,8 +732,6 @@ struct device_link *device_link_add(struct device *consumer, if (!consumer || !supplier || consumer == supplier || flags & ~DL_ADD_VALID_FLAGS || (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) || - (flags & DL_FLAG_SYNC_STATE_ONLY && - (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) || (flags & DL_FLAG_AUTOPROBE_CONSUMER && flags & (DL_FLAG_AUTOREMOVE_CONSUMER | DL_FLAG_AUTOREMOVE_SUPPLIER))) @@ -720,6 +747,10 @@ struct device_link *device_link_add(struct device *consumer, if (!(flags & DL_FLAG_STATELESS)) flags |= DL_FLAG_MANAGED; + if (flags & DL_FLAG_SYNC_STATE_ONLY && + !device_link_flag_is_sync_state_only(flags)) + return NULL; + device_links_write_lock(); device_pm_lock(); @@ -760,12 +791,12 @@ struct device_link *device_link_add(struct device *consumer, if (link->consumer != consumer) continue; - if (link->flags & DL_FLAG_INFERRED && + if (device_link_test(link, DL_FLAG_INFERRED) && !(flags & DL_FLAG_INFERRED)) link->flags &= ~DL_FLAG_INFERRED; if (flags & DL_FLAG_PM_RUNTIME) { - if (!(link->flags & DL_FLAG_PM_RUNTIME)) { + if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) { pm_runtime_new_link(consumer); link->flags |= DL_FLAG_PM_RUNTIME; } @@ -775,8 +806,8 @@ struct device_link *device_link_add(struct device *consumer, if (flags & DL_FLAG_STATELESS) { kref_get(&link->kref); - if (link->flags & DL_FLAG_SYNC_STATE_ONLY && - !(link->flags & DL_FLAG_STATELESS)) { + if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) && + !device_link_test(link, DL_FLAG_STATELESS)) { link->flags |= DL_FLAG_STATELESS; goto reorder; } else { @@ -791,7 +822,7 @@ struct device_link *device_link_add(struct device *consumer, * update the existing link to stay around longer. */ if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { - if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { + if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) { link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; } @@ -799,12 +830,12 @@ struct device_link *device_link_add(struct device *consumer, link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | DL_FLAG_AUTOREMOVE_SUPPLIER); } - if (!(link->flags & DL_FLAG_MANAGED)) { + if (!device_link_test(link, DL_FLAG_MANAGED)) { kref_get(&link->kref); link->flags |= DL_FLAG_MANAGED; device_link_init_status(link, consumer, supplier); } - if (link->flags & DL_FLAG_SYNC_STATE_ONLY && + if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) && !(flags & DL_FLAG_SYNC_STATE_ONLY)) { link->flags &= ~DL_FLAG_SYNC_STATE_ONLY; goto reorder; @@ -908,7 +939,7 @@ static void __device_link_del(struct kref *kref) static void device_link_put_kref(struct device_link *link) { - if (link->flags & DL_FLAG_STATELESS) + if (device_link_test(link, DL_FLAG_STATELESS)) kref_put(&link->kref, __device_link_del); else if (!device_is_registered(link->consumer)) __device_link_del(&link->kref); @@ -972,7 +1003,7 @@ static void device_links_missing_supplier(struct device *dev) if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { WRITE_ONCE(link->status, DL_STATE_AVAILABLE); } else { - WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); + WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)); WRITE_ONCE(link->status, DL_STATE_DORMANT); } } @@ -984,6 +1015,22 @@ static bool dev_is_best_effort(struct device *dev) (dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT)); } +static struct fwnode_handle *fwnode_links_check_suppliers( + struct fwnode_handle *fwnode) +{ + struct fwnode_link *link; + + if (!fwnode || fw_devlink_is_permissive()) + return NULL; + + list_for_each_entry(link, &fwnode->suppliers, c_hook) + if (!(link->flags & + (FWLINK_FLAG_CYCLE | FWLINK_FLAG_IGNORE))) + return link->supplier; + + return NULL; +} + /** * device_links_check_suppliers - Check presence of supplier drivers. * @dev: Consumer device. @@ -1010,45 +1057,36 @@ int device_links_check_suppliers(struct device *dev) * Device waiting for supplier to become available is not allowed to * probe. */ - mutex_lock(&fwnode_link_lock); - if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) && - !fw_devlink_is_permissive()) { - sup_fw = list_first_entry(&dev->fwnode->suppliers, - struct fwnode_link, - c_hook)->supplier; - if (!dev_is_best_effort(dev)) { - fwnode_ret = -EPROBE_DEFER; - dev_err_probe(dev, -EPROBE_DEFER, - "wait for supplier %pfwP\n", sup_fw); - } else { - fwnode_ret = -EAGAIN; + scoped_guard(mutex, &fwnode_link_lock) { + sup_fw = fwnode_links_check_suppliers(dev->fwnode); + if (sup_fw) { + if (dev_is_best_effort(dev)) + fwnode_ret = -EAGAIN; + else + return dev_err_probe(dev, -EPROBE_DEFER, + "wait for supplier %pfwf\n", sup_fw); } } - mutex_unlock(&fwnode_link_lock); - if (fwnode_ret == -EPROBE_DEFER) - return fwnode_ret; device_links_write_lock(); list_for_each_entry(link, &dev->links.suppliers, c_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; if (link->status != DL_STATE_AVAILABLE && - !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) { + !device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) { if (dev_is_best_effort(dev) && - link->flags & DL_FLAG_INFERRED && + device_link_test(link, DL_FLAG_INFERRED) && !link->supplier->can_match) { ret = -EAGAIN; continue; } device_links_missing_supplier(dev); - dev_err_probe(dev, -EPROBE_DEFER, - "supplier %s not ready\n", - dev_name(link->supplier)); - ret = -EPROBE_DEFER; + ret = dev_err_probe(dev, -EPROBE_DEFER, + "supplier %s not ready\n", dev_name(link->supplier)); break; } WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); @@ -1089,7 +1127,7 @@ static void __device_links_queue_sync_state(struct device *dev, return; list_for_each_entry(link, &dev->links.consumers, s_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; if (link->status != DL_STATE_ACTIVE) return; @@ -1130,10 +1168,7 @@ static void device_links_flush_sync_list(struct list_head *list, if (dev != dont_lock_dev) device_lock(dev); - if (dev->bus->sync_state) - dev->bus->sync_state(dev); - else if (dev->driver && dev->driver->sync_state) - dev->driver->sync_state(dev); + dev_sync_state(dev); if (dev != dont_lock_dev) device_unlock(dev); @@ -1204,7 +1239,8 @@ static ssize_t waiting_for_supplier_show(struct device *dev, bool val; device_lock(dev); - val = !list_empty(&dev->fwnode->suppliers); + scoped_guard(mutex, &fwnode_link_lock) + val = !!fwnode_links_check_suppliers(dev->fwnode); device_unlock(dev); return sysfs_emit(buf, "%u\n", val); } @@ -1231,7 +1267,7 @@ void device_links_force_bind(struct device *dev) device_links_write_lock(); list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; if (link->status != DL_STATE_AVAILABLE) { @@ -1267,23 +1303,32 @@ void device_links_driver_bound(struct device *dev) * them. So, fw_devlink no longer needs to create device links to any * of the device's suppliers. * - * Also, if a child firmware node of this bound device is not added as - * a device by now, assume it is never going to be added and make sure - * other devices don't defer probe indefinitely by waiting for such a - * child device. + * Also, if a child firmware node of this bound device is not added as a + * device by now, assume it is never going to be added. Make this bound + * device the fallback supplier to the dangling consumers of the child + * firmware node because this bound device is probably implementing the + * child firmware node functionality and we don't want the dangling + * consumers to defer probe indefinitely waiting for a device for the + * child firmware node. */ if (dev->fwnode && dev->fwnode->dev == dev) { struct fwnode_handle *child; + fwnode_links_purge_suppliers(dev->fwnode); + + guard(mutex)(&fwnode_link_lock); + fwnode_for_each_available_child_node(dev->fwnode, child) - fw_devlink_purge_absent_suppliers(child); + __fw_devlink_pickup_dangling_consumers(child, + dev->fwnode); + __fw_devlink_link_to_consumers(dev); } device_remove_file(dev, &dev_attr_waiting_for_supplier); device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; /* @@ -1299,7 +1344,7 @@ void device_links_driver_bound(struct device *dev) WARN_ON(link->status != DL_STATE_DORMANT); WRITE_ONCE(link->status, DL_STATE_AVAILABLE); - if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) + if (device_link_test(link, DL_FLAG_AUTOPROBE_CONSUMER)) driver_deferred_probe_add(link->consumer); } @@ -1311,11 +1356,11 @@ void device_links_driver_bound(struct device *dev) list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { struct device *supplier; - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; supplier = link->supplier; - if (link->flags & DL_FLAG_SYNC_STATE_ONLY) { + if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) { /* * When DL_FLAG_SYNC_STATE_ONLY is set, it means no * other DL_MANAGED_LINK_FLAGS have been set. So, it's @@ -1323,7 +1368,7 @@ void device_links_driver_bound(struct device *dev) */ device_link_drop_managed(link); } else if (dev_is_best_effort(dev) && - link->flags & DL_FLAG_INFERRED && + device_link_test(link, DL_FLAG_INFERRED) && link->status != DL_STATE_CONSUMER_PROBE && !link->supplier->can_match) { /* @@ -1375,10 +1420,10 @@ static void __device_links_no_driver(struct device *dev) struct device_link *link, *ln; list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; - if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { + if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) { device_link_drop_managed(link); continue; } @@ -1390,7 +1435,7 @@ static void __device_links_no_driver(struct device *dev) if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { WRITE_ONCE(link->status, DL_STATE_AVAILABLE); } else { - WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); + WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)); WRITE_ONCE(link->status, DL_STATE_DORMANT); } } @@ -1415,7 +1460,7 @@ void device_links_no_driver(struct device *dev) device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; /* @@ -1452,10 +1497,10 @@ void device_links_driver_cleanup(struct device *dev) device_links_write_lock(); list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; - WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); + WARN_ON(device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)); WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND); /* @@ -1464,7 +1509,7 @@ void device_links_driver_cleanup(struct device *dev) * has moved to DL_STATE_SUPPLIER_UNBIND. */ if (link->status == DL_STATE_SUPPLIER_UNBIND && - link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER)) device_link_drop_managed(link); WRITE_ONCE(link->status, DL_STATE_DORMANT); @@ -1498,7 +1543,7 @@ bool device_links_busy(struct device *dev) device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; if (link->status == DL_STATE_CONSUMER_PROBE @@ -1540,8 +1585,8 @@ void device_links_unbind_consumers(struct device *dev) list_for_each_entry(link, &dev->links.consumers, s_node) { enum device_link_state status; - if (!(link->flags & DL_FLAG_MANAGED) || - link->flags & DL_FLAG_SYNC_STATE_ONLY) + if (!device_link_test(link, DL_FLAG_MANAGED) || + device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) continue; status = link->status; @@ -1607,7 +1652,7 @@ static void device_links_purge(struct device *dev) #define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \ DL_FLAG_PM_RUNTIME) -static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON; +static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_RPM; static int __init fw_devlink_setup(char *arg) { if (!arg) @@ -1633,8 +1678,36 @@ static int __init fw_devlink_strict_setup(char *arg) } early_param("fw_devlink.strict", fw_devlink_strict_setup); -u32 fw_devlink_get_flags(void) +#define FW_DEVLINK_SYNC_STATE_STRICT 0 +#define FW_DEVLINK_SYNC_STATE_TIMEOUT 1 + +#ifndef CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT +static int fw_devlink_sync_state; +#else +static int fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_TIMEOUT; +#endif + +static int __init fw_devlink_sync_state_setup(char *arg) { + if (!arg) + return -EINVAL; + + if (strcmp(arg, "strict") == 0) { + fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_STRICT; + return 0; + } else if (strcmp(arg, "timeout") == 0) { + fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_TIMEOUT; + return 0; + } + return -EINVAL; +} +early_param("fw_devlink.sync_state", fw_devlink_sync_state_setup); + +static inline u32 fw_devlink_get_flags(u8 fwlink_flags) +{ + if (fwlink_flags & FWLINK_FLAG_CYCLE) + return FW_DEVLINK_FLAGS_PERMISSIVE | DL_FLAG_CYCLE; + return fw_devlink_flags; } @@ -1669,10 +1742,10 @@ static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode) static void fw_devlink_relax_link(struct device_link *link) { - if (!(link->flags & DL_FLAG_INFERRED)) + if (!device_link_test(link, DL_FLAG_INFERRED)) return; - if (link->flags == (DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE)) + if (device_link_flag_is_sync_state_only(link->flags)) return; pm_runtime_drop_link(link); @@ -1700,6 +1773,44 @@ void fw_devlink_drivers_done(void) device_links_write_unlock(); } +static int fw_devlink_dev_sync_state(struct device *dev, void *data) +{ + struct device_link *link = to_devlink(dev); + struct device *sup = link->supplier; + + if (!device_link_test(link, DL_FLAG_MANAGED) || + link->status == DL_STATE_ACTIVE || sup->state_synced || + !dev_has_sync_state(sup)) + return 0; + + if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) { + dev_info(sup, "sync_state() pending due to %s\n", + dev_name(link->consumer)); + return 0; + } + + if (!list_empty(&sup->links.defer_sync)) + return 0; + + dev_warn(sup, "Timed out. Forcing sync_state()\n"); + sup->state_synced = true; + get_device(sup); + list_add_tail(&sup->links.defer_sync, data); + + return 0; +} + +void fw_devlink_probing_done(void) +{ + LIST_HEAD(sync_list); + + device_links_write_lock(); + class_for_each_device(&devlink_class, NULL, &sync_list, + fw_devlink_dev_sync_state); + device_links_write_unlock(); + device_links_flush_sync_list(&sync_list, NULL); +} + /** * wait_for_init_devices_probe - Try to probe any device needed for init * @@ -1769,44 +1880,204 @@ static void fw_devlink_unblock_consumers(struct device *dev) device_links_write_unlock(); } +static bool fwnode_init_without_drv(struct fwnode_handle *fwnode) +{ + struct device *dev; + bool ret; + + if (!(fwnode->flags & FWNODE_FLAG_INITIALIZED)) + return false; + + dev = get_dev_from_fwnode(fwnode); + ret = !dev || dev->links.status == DL_DEV_NO_DRIVER; + put_device(dev); + + return ret; +} + +static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode) +{ + struct fwnode_handle *parent; + + fwnode_for_each_parent_node(fwnode, parent) { + if (fwnode_init_without_drv(parent)) { + fwnode_handle_put(parent); + return true; + } + } + + return false; +} + /** - * fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links - * @con: Device to check dependencies for. - * @sup: Device to check against. - * - * Check if @sup depends on @con or any device dependent on it (its child or - * its consumer etc). When such a cyclic dependency is found, convert all - * device links created solely by fw_devlink into SYNC_STATE_ONLY device links. - * This is the equivalent of doing fw_devlink=permissive just between the - * devices in the cycle. We need to do this because, at this point, fw_devlink - * can't tell which of these dependencies is not a real dependency. - * - * Return 1 if a cycle is found. Otherwise, return 0. + * fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child + * @ancestor: Firmware which is tested for being an ancestor + * @child: Firmware which is tested for being the child + * + * A node is considered an ancestor of itself too. + * + * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false. */ -static int fw_devlink_relax_cycle(struct device *con, void *sup) +static bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor, + const struct fwnode_handle *child) { - struct device_link *link; - int ret; + struct fwnode_handle *parent; - if (con == sup) - return 1; + if (IS_ERR_OR_NULL(ancestor)) + return false; - ret = device_for_each_child(con, sup, fw_devlink_relax_cycle); - if (ret) - return ret; + if (child == ancestor) + return true; - list_for_each_entry(link, &con->links.consumers, s_node) { - if ((link->flags & ~DL_FLAG_INFERRED) == - (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) - continue; + fwnode_for_each_parent_node(child, parent) { + if (parent == ancestor) { + fwnode_handle_put(parent); + return true; + } + } + return false; +} - if (!fw_devlink_relax_cycle(link->consumer, sup)) +/** + * fwnode_get_next_parent_dev - Find device of closest ancestor fwnode + * @fwnode: firmware node + * + * Given a firmware node (@fwnode), this function finds its closest ancestor + * firmware node that has a corresponding struct device and returns that struct + * device. + * + * The caller is responsible for calling put_device() on the returned device + * pointer. + * + * Return: a pointer to the device of the @fwnode's closest ancestor. + */ +static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode) +{ + struct fwnode_handle *parent; + struct device *dev; + + fwnode_for_each_parent_node(fwnode, parent) { + dev = get_dev_from_fwnode(parent); + if (dev) { + fwnode_handle_put(parent); + return dev; + } + } + return NULL; +} + +/** + * __fw_devlink_relax_cycles - Relax and mark dependency cycles. + * @con_handle: Potential consumer device fwnode. + * @sup_handle: Potential supplier's fwnode. + * + * Needs to be called with fwnode_lock and device link lock held. + * + * Check if @sup_handle or any of its ancestors or suppliers direct/indirectly + * depend on @con. This function can detect multiple cyles between @sup_handle + * and @con. When such dependency cycles are found, convert all device links + * created solely by fw_devlink into SYNC_STATE_ONLY device links. Also, mark + * all fwnode links in the cycle with FWLINK_FLAG_CYCLE so that when they are + * converted into a device link in the future, they are created as + * SYNC_STATE_ONLY device links. This is the equivalent of doing + * fw_devlink=permissive just between the devices in the cycle. We need to do + * this because, at this point, fw_devlink can't tell which of these + * dependencies is not a real dependency. + * + * Return true if one or more cycles were found. Otherwise, return false. + */ +static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle, + struct fwnode_handle *sup_handle) +{ + struct device *sup_dev = NULL, *par_dev = NULL, *con_dev = NULL; + struct fwnode_link *link; + struct device_link *dev_link; + bool ret = false; + + if (!sup_handle) + return false; + + /* + * We aren't trying to find all cycles. Just a cycle between con and + * sup_handle. + */ + if (sup_handle->flags & FWNODE_FLAG_VISITED) + return false; + + sup_handle->flags |= FWNODE_FLAG_VISITED; + + /* Termination condition. */ + if (sup_handle == con_handle) { + pr_debug("----- cycle: start -----\n"); + ret = true; + goto out; + } + + sup_dev = get_dev_from_fwnode(sup_handle); + con_dev = get_dev_from_fwnode(con_handle); + /* + * If sup_dev is bound to a driver and @con hasn't started binding to a + * driver, sup_dev can't be a consumer of @con. So, no need to check + * further. + */ + if (sup_dev && sup_dev->links.status == DL_DEV_DRIVER_BOUND && + con_dev && con_dev->links.status == DL_DEV_NO_DRIVER) { + ret = false; + goto out; + } + + list_for_each_entry(link, &sup_handle->suppliers, c_hook) { + if (link->flags & FWLINK_FLAG_IGNORE) continue; - ret = 1; + if (__fw_devlink_relax_cycles(con_handle, link->supplier)) { + __fwnode_link_cycle(link); + ret = true; + } + } - fw_devlink_relax_link(link); + /* + * Give priority to device parent over fwnode parent to account for any + * quirks in how fwnodes are converted to devices. + */ + if (sup_dev) + par_dev = get_device(sup_dev->parent); + else + par_dev = fwnode_get_next_parent_dev(sup_handle); + + if (par_dev && __fw_devlink_relax_cycles(con_handle, par_dev->fwnode)) { + pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle, + par_dev->fwnode); + ret = true; } + + if (!sup_dev) + goto out; + + list_for_each_entry(dev_link, &sup_dev->links.suppliers, c_node) { + /* + * Ignore a SYNC_STATE_ONLY flag only if it wasn't marked as + * such due to a cycle. + */ + if (device_link_flag_is_sync_state_only(dev_link->flags) && + !device_link_test(dev_link, DL_FLAG_CYCLE)) + continue; + + if (__fw_devlink_relax_cycles(con_handle, + dev_link->supplier->fwnode)) { + pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle, + dev_link->supplier->fwnode); + fw_devlink_relax_link(dev_link); + dev_link->flags |= DL_FLAG_CYCLE; + ret = true; + } + } + +out: + sup_handle->flags &= ~FWNODE_FLAG_VISITED; + put_device(sup_dev); + put_device(con_dev); + put_device(par_dev); return ret; } @@ -1814,7 +2085,7 @@ static int fw_devlink_relax_cycle(struct device *con, void *sup) * fw_devlink_create_devlink - Create a device link from a consumer to fwnode * @con: consumer device for the device link * @sup_handle: fwnode handle of supplier - * @flags: devlink flags + * @link: fwnode link that's being converted to a device link * * This function will try to create a device link between the consumer device * @con and the supplier device represented by @sup_handle. @@ -1831,10 +2102,15 @@ static int fw_devlink_relax_cycle(struct device *con, void *sup) * possible to do that in the future */ static int fw_devlink_create_devlink(struct device *con, - struct fwnode_handle *sup_handle, u32 flags) + struct fwnode_handle *sup_handle, + struct fwnode_link *link) { struct device *sup_dev; int ret = 0; + u32 flags; + + if (link->flags & FWLINK_FLAG_IGNORE) + return 0; /* * In some cases, a device P might also be a supplier to its child node @@ -1855,7 +2131,30 @@ static int fw_devlink_create_devlink(struct device *con, fwnode_is_ancestor_of(sup_handle, con->fwnode)) return -EINVAL; - sup_dev = get_dev_from_fwnode(sup_handle); + /* + * Don't try to optimize by not calling the cycle detection logic under + * certain conditions. There's always some corner case that won't get + * detected. + */ + device_links_write_lock(); + if (__fw_devlink_relax_cycles(link->consumer, sup_handle)) { + __fwnode_link_cycle(link); + pr_debug("----- cycle: end -----\n"); + pr_info("%pfwf: Fixed dependency cycle(s) with %pfwf\n", + link->consumer, sup_handle); + } + device_links_write_unlock(); + + if (con->fwnode == link->consumer) + flags = fw_devlink_get_flags(link->flags); + else + flags = FW_DEVLINK_FLAGS_PERMISSIVE; + + if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE) + sup_dev = fwnode_get_next_parent_dev(sup_handle); + else + sup_dev = get_dev_from_fwnode(sup_handle); + if (sup_dev) { /* * If it's one of those drivers that don't actually bind to @@ -1864,71 +2163,34 @@ static int fw_devlink_create_devlink(struct device *con, */ if (sup_dev->links.status == DL_DEV_NO_DRIVER && sup_handle->flags & FWNODE_FLAG_INITIALIZED) { + dev_dbg(con, + "Not linking %pfwf - dev might never probe\n", + sup_handle); ret = -EINVAL; goto out; } - /* - * If this fails, it is due to cycles in device links. Just - * give up on this link and treat it as invalid. - */ - if (!device_link_add(con, sup_dev, flags) && - !(flags & DL_FLAG_SYNC_STATE_ONLY)) { - dev_info(con, "Fixing up cyclic dependency with %s\n", - dev_name(sup_dev)); - device_links_write_lock(); - fw_devlink_relax_cycle(con, sup_dev); - device_links_write_unlock(); - device_link_add(con, sup_dev, - FW_DEVLINK_FLAGS_PERMISSIVE); + if (con != sup_dev && !device_link_add(con, sup_dev, flags)) { + dev_err(con, "Failed to create device link (0x%x) with supplier %s for %pfwf\n", + flags, dev_name(sup_dev), link->consumer); ret = -EINVAL; } goto out; } - /* Supplier that's already initialized without a struct device. */ - if (sup_handle->flags & FWNODE_FLAG_INITIALIZED) - return -EINVAL; - - /* - * DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports - * cycles. So cycle detection isn't necessary and shouldn't be - * done. - */ - if (flags & DL_FLAG_SYNC_STATE_ONLY) - return -EAGAIN; - /* - * If we can't find the supplier device from its fwnode, it might be - * due to a cyclic dependency between fwnodes. Some of these cycles can - * be broken by applying logic. Check for these types of cycles and - * break them so that devices in the cycle probe properly. - * - * If the supplier's parent is dependent on the consumer, then the - * consumer and supplier have a cyclic dependency. Since fw_devlink - * can't tell which of the inferred dependencies are incorrect, don't - * enforce probe ordering between any of the devices in this cyclic - * dependency. Do this by relaxing all the fw_devlink device links in - * this cycle and by treating the fwnode link between the consumer and - * the supplier as an invalid dependency. + * Supplier or supplier's ancestor already initialized without a struct + * device or being probed by a driver. */ - sup_dev = fwnode_get_next_parent_dev(sup_handle); - if (sup_dev && device_is_dependent(con, sup_dev)) { - dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n", - sup_handle, dev_name(sup_dev)); - device_links_write_lock(); - fw_devlink_relax_cycle(con, sup_dev); - device_links_write_unlock(); - ret = -EINVAL; - } else { - /* - * Can't check for cycles or no cycles. So let's try - * again later. - */ - ret = -EAGAIN; + if (fwnode_init_without_drv(sup_handle) || + fwnode_ancestor_init_without_drv(sup_handle)) { + dev_dbg(con, "Not linking %pfwf - might never become dev\n", + sup_handle); + return -EINVAL; } + ret = -EAGAIN; out: put_device(sup_dev); return ret; @@ -1956,7 +2218,6 @@ static void __fw_devlink_link_to_consumers(struct device *dev) struct fwnode_link *link, *tmp; list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) { - u32 dl_flags = fw_devlink_get_flags(); struct device *con_dev; bool own_link = true; int ret; @@ -1986,14 +2247,13 @@ static void __fw_devlink_link_to_consumers(struct device *dev) con_dev = NULL; } else { own_link = false; - dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE; } } if (!con_dev) continue; - ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags); + ret = fw_devlink_create_devlink(con_dev, fwnode, link); put_device(con_dev); if (!own_link || ret == -EAGAIN) continue; @@ -2013,10 +2273,7 @@ static void __fw_devlink_link_to_consumers(struct device *dev) * * The function creates normal (non-SYNC_STATE_ONLY) device links between @dev * and the real suppliers of @dev. Once these device links are created, the - * fwnode links are deleted. When such device links are successfully created, - * this function is called recursively on those supplier devices. This is - * needed to detect and break some invalid cycles in fwnode links. See - * fw_devlink_create_devlink() for more details. + * fwnode links are deleted. * * In addition, it also looks at all the suppliers of the entire fwnode tree * because some of the child devices of @dev that have not been added yet @@ -2034,44 +2291,16 @@ static void __fw_devlink_link_to_suppliers(struct device *dev, bool own_link = (dev->fwnode == fwnode); struct fwnode_link *link, *tmp; struct fwnode_handle *child = NULL; - u32 dl_flags; - - if (own_link) - dl_flags = fw_devlink_get_flags(); - else - dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE; list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) { int ret; - struct device *sup_dev; struct fwnode_handle *sup = link->supplier; - ret = fw_devlink_create_devlink(dev, sup, dl_flags); + ret = fw_devlink_create_devlink(dev, sup, link); if (!own_link || ret == -EAGAIN) continue; __fwnode_link_del(link); - - /* If no device link was created, nothing more to do. */ - if (ret) - continue; - - /* - * If a device link was successfully created to a supplier, we - * now need to try and link the supplier to all its suppliers. - * - * This is needed to detect and delete false dependencies in - * fwnode links that haven't been converted to a device link - * yet. See comments in fw_devlink_create_devlink() for more - * details on the false dependency. - * - * Without deleting these false dependencies, some devices will - * never probe because they'll keep waiting for their false - * dependency fwnode links to be converted to device links. - */ - sup_dev = get_dev_from_fwnode(sup); - __fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode); - put_device(sup_dev); } /* @@ -2093,19 +2322,21 @@ static void fw_devlink_link_device(struct device *dev) fw_devlink_parse_fwtree(fwnode); - mutex_lock(&fwnode_link_lock); + guard(mutex)(&fwnode_link_lock); + __fw_devlink_link_to_consumers(dev); __fw_devlink_link_to_suppliers(dev, fwnode); - mutex_unlock(&fwnode_link_lock); } /* Device links support end. */ -int (*platform_notify)(struct device *dev) = NULL; -int (*platform_notify_remove)(struct device *dev) = NULL; static struct kobject *dev_kobj; -struct kobject *sysfs_dev_char_kobj; -struct kobject *sysfs_dev_block_kobj; + +/* /sys/dev/char */ +static struct kobject *sysfs_dev_char_kobj; + +/* /sys/dev/block */ +static struct kobject *sysfs_dev_block_kobj; static DEFINE_MUTEX(device_hotplug_lock); @@ -2146,19 +2377,13 @@ static void device_platform_notify(struct device *dev) acpi_device_notify(dev); software_node_notify(dev); - - if (platform_notify) - platform_notify(dev); } static void device_platform_notify_remove(struct device *dev) { - acpi_device_notify_remove(dev); - software_node_notify_remove(dev); - if (platform_notify_remove) - platform_notify_remove(dev); + acpi_device_notify_remove(dev); } /** @@ -2297,6 +2522,15 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, } EXPORT_SYMBOL_GPL(device_show_bool); +ssize_t device_show_string(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + + return sysfs_emit(buf, "%s\n", (char *)ea->var); +} +EXPORT_SYMBOL_GPL(device_show_string); + /** * device_release - free device structure. * @kobj: device's kobject. @@ -2340,7 +2574,7 @@ static const void *device_namespace(const struct kobject *kobj) const struct device *dev = kobj_to_dev(kobj); const void *ns = NULL; - if (dev->class && dev->class->ns_type) + if (dev->class && dev->class->namespace) ns = dev->class->namespace(dev); return ns; @@ -2354,7 +2588,7 @@ static void device_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t dev->class->get_ownership(dev, uid, gid); } -static struct kobj_type device_ktype = { +static const struct kobj_type device_ktype = { .release = device_release, .sysfs_ops = &dev_sysfs_ops, .namespace = device_namespace, @@ -2387,9 +2621,38 @@ static const char *dev_uevent_name(const struct kobject *kobj) return NULL; } -static int dev_uevent(struct kobject *kobj, struct kobj_uevent_env *env) +/* + * Try filling "DRIVER=<name>" uevent variable for a device. Because this + * function may race with binding and unbinding the device from a driver, + * we need to be careful. Binding is generally safe, at worst we miss the + * fact that the device is already bound to a driver (but the driver + * information that is delivered through uevents is best-effort, it may + * become obsolete as soon as it is generated anyways). Unbinding is more + * risky as driver pointer is transitioning to NULL, so READ_ONCE() should + * be used to make sure we are dealing with the same pointer, and to + * ensure that driver structure is not going to disappear from under us + * we take bus' drivers klist lock. The assumption that only registered + * driver can be bound to a device, and to unregister a driver bus code + * will take the same lock. + */ +static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct device *dev = kobj_to_dev(kobj); + struct subsys_private *sp = bus_to_subsys(dev->bus); + + if (sp) { + scoped_guard(spinlock, &sp->klist_drivers.k_lock) { + struct device_driver *drv = READ_ONCE(dev->driver); + if (drv) + add_uevent_var(env, "DRIVER=%s", drv->name); + } + + subsys_put(sp); + } +} + +static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) +{ + const struct device *dev = kobj_to_dev(kobj); int retval = 0; /* add device node properties if present */ @@ -2418,8 +2681,8 @@ static int dev_uevent(struct kobject *kobj, struct kobj_uevent_env *env) if (dev->type && dev->type->name) add_uevent_var(env, "DEVTYPE=%s", dev->type->name); - if (dev->driver) - add_uevent_var(env, "DRIVER=%s", dev->driver->name); + /* Add "DRIVER=%s" variable if the device is bound to a driver */ + dev_driver_uevent(dev, env); /* Add common DT information about the device */ of_device_uevent(dev, env); @@ -2595,15 +2858,6 @@ static void devm_attr_group_remove(struct device *dev, void *res) sysfs_remove_group(&dev->kobj, group); } -static void devm_attr_groups_remove(struct device *dev, void *res) -{ - union device_attr_group_devres *devres = res; - const struct attribute_group **groups = devres->groups; - - dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); - sysfs_remove_groups(&dev->kobj, groups); -} - /** * devm_device_add_group - given a device, create a managed attribute group * @dev: The device to create the group for @@ -2636,45 +2890,9 @@ int devm_device_add_group(struct device *dev, const struct attribute_group *grp) } EXPORT_SYMBOL_GPL(devm_device_add_group); -/** - * devm_device_add_groups - create a bunch of managed attribute groups - * @dev: The device to create the group for - * @groups: The attribute groups to create, NULL terminated - * - * This function creates a bunch of managed attribute groups. If an error - * occurs when creating a group, all previously created groups will be - * removed, unwinding everything back to the original state when this - * function was called. It will explicitly warn and error if any of the - * attribute files being created already exist. - * - * Returns 0 on success or error code from sysfs_create_group on failure. - */ -int devm_device_add_groups(struct device *dev, - const struct attribute_group **groups) -{ - union device_attr_group_devres *devres; - int error; - - devres = devres_alloc(devm_attr_groups_remove, - sizeof(*devres), GFP_KERNEL); - if (!devres) - return -ENOMEM; - - error = sysfs_create_groups(&dev->kobj, groups); - if (error) { - devres_free(devres); - return error; - } - - devres->groups = groups; - devres_add(dev, devres); - return 0; -} -EXPORT_SYMBOL_GPL(devm_device_add_groups); - static int device_add_attrs(struct device *dev) { - struct class *class = dev->class; + const struct class *class = dev->class; const struct device_type *type = dev->type; int error; @@ -2741,7 +2959,7 @@ static int device_add_attrs(struct device *dev) static void device_remove_attrs(struct device *dev) { - struct class *class = dev->class; + const struct class *class = dev->class; const struct device_type *type = dev->type; if (dev->physical_location) { @@ -2955,13 +3173,11 @@ void device_initialize(struct device *dev) defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) dev->dma_coherent = dma_default_coherent; #endif -#ifdef CONFIG_SWIOTLB - dev->dma_io_tlb_mem = &io_tlb_default_mem; -#endif + swiotlb_dev_init(dev); } EXPORT_SYMBOL_GPL(device_initialize); -struct kobject *virtual_device_parent(struct device *dev) +struct kobject *virtual_device_parent(void) { static struct kobject *virtual_dir = NULL; @@ -2974,7 +3190,7 @@ struct kobject *virtual_device_parent(struct device *dev) struct class_dir { struct kobject kobj; - struct class *class; + const struct class *class; }; #define to_class_dir(obj) container_of(obj, struct class_dir, kobj) @@ -2992,14 +3208,14 @@ struct kobj_ns_type_operations *class_dir_child_ns_type(const struct kobject *ko return dir->class->ns_type; } -static struct kobj_type class_dir_ktype = { +static const struct kobj_type class_dir_ktype = { .release = class_dir_release, .sysfs_ops = &kobj_sysfs_ops, .child_ns_type = class_dir_child_ns_type }; -static struct kobject * -class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) +static struct kobject *class_dir_create_and_add(struct subsys_private *sp, + struct kobject *parent_kobj) { struct class_dir *dir; int retval; @@ -3008,12 +3224,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) if (!dir) return ERR_PTR(-ENOMEM); - dir->class = class; + dir->class = sp->class; kobject_init(&dir->kobj, &class_dir_ktype); - dir->kobj.kset = &class->p->glue_dirs; + dir->kobj.kset = &sp->glue_dirs; - retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); + retval = kobject_add(&dir->kobj, parent_kobj, "%s", sp->class->name); if (retval < 0) { kobject_put(&dir->kobj); return ERR_PTR(retval); @@ -3026,57 +3242,61 @@ static DEFINE_MUTEX(gdp_mutex); static struct kobject *get_device_parent(struct device *dev, struct device *parent) { - if (dev->class) { - struct kobject *kobj = NULL; + struct subsys_private *sp = class_to_subsys(dev->class); + struct kobject *kobj = NULL; + + if (sp) { struct kobject *parent_kobj; struct kobject *k; -#ifdef CONFIG_BLOCK - /* block disks show up in /sys/block */ - if (sysfs_deprecated && dev->class == &block_class) { - if (parent && parent->class == &block_class) - return &parent->kobj; - return &block_class.p->subsys.kobj; - } -#endif - /* * If we have no parent, we live in "virtual". * Class-devices with a non class-device as parent, live * in a "glue" directory to prevent namespace collisions. */ if (parent == NULL) - parent_kobj = virtual_device_parent(dev); - else if (parent->class && !dev->class->ns_type) + parent_kobj = virtual_device_parent(); + else if (parent->class && !dev->class->ns_type) { + subsys_put(sp); return &parent->kobj; - else + } else { parent_kobj = &parent->kobj; + } mutex_lock(&gdp_mutex); /* find our class-directory at the parent and reference it */ - spin_lock(&dev->class->p->glue_dirs.list_lock); - list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry) + spin_lock(&sp->glue_dirs.list_lock); + list_for_each_entry(k, &sp->glue_dirs.list, entry) if (k->parent == parent_kobj) { kobj = kobject_get(k); break; } - spin_unlock(&dev->class->p->glue_dirs.list_lock); + spin_unlock(&sp->glue_dirs.list_lock); if (kobj) { mutex_unlock(&gdp_mutex); + subsys_put(sp); return kobj; } /* or create a new class-directory at the parent device */ - k = class_dir_create_and_add(dev->class, parent_kobj); + k = class_dir_create_and_add(sp, parent_kobj); /* do not emit an uevent for this simple "glue" directory */ mutex_unlock(&gdp_mutex); + subsys_put(sp); return k; } /* subsystems can specify a default root directory for their devices */ - if (!parent && dev->bus && dev->bus->dev_root) - return &dev->bus->dev_root->kobj; + if (!parent && dev->bus) { + struct device *dev_root = bus_get_dev_root(dev->bus); + + if (dev_root) { + kobj = &dev_root->kobj; + put_device(dev_root); + return kobj; + } + } if (parent) return &parent->kobj; @@ -3086,10 +3306,23 @@ static struct kobject *get_device_parent(struct device *dev, static inline bool live_in_glue_dir(struct kobject *kobj, struct device *dev) { - if (!kobj || !dev->class || - kobj->kset != &dev->class->p->glue_dirs) + struct subsys_private *sp; + bool retval; + + if (!kobj || !dev->class) return false; - return true; + + sp = class_to_subsys(dev->class); + if (!sp) + return false; + + if (kobj->kset == &sp->glue_dirs) + retval = true; + else + retval = false; + + subsys_put(sp); + return retval; } static inline struct kobject *get_glue_dir(struct device *dev) @@ -3186,6 +3419,7 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) static int device_add_class_symlinks(struct device *dev) { struct device_node *of_node = dev_of_node(dev); + struct subsys_private *sp; int error; if (of_node) { @@ -3195,12 +3429,11 @@ static int device_add_class_symlinks(struct device *dev) /* An error here doesn't warrant bringing down the device */ } - if (!dev->class) + sp = class_to_subsys(dev->class); + if (!sp) return 0; - error = sysfs_create_link(&dev->kobj, - &dev->class->p->subsys.kobj, - "subsystem"); + error = sysfs_create_link(&dev->kobj, &sp->subsys.kobj, "subsystem"); if (error) goto out_devnode; @@ -3211,46 +3444,38 @@ static int device_add_class_symlinks(struct device *dev) goto out_subsys; } -#ifdef CONFIG_BLOCK - /* /sys/block has directories and does not need symlinks */ - if (sysfs_deprecated && dev->class == &block_class) - return 0; -#endif - /* link in the class directory pointing to the device */ - error = sysfs_create_link(&dev->class->p->subsys.kobj, - &dev->kobj, dev_name(dev)); + error = sysfs_create_link(&sp->subsys.kobj, &dev->kobj, dev_name(dev)); if (error) goto out_device; - - return 0; + goto exit; out_device: sysfs_remove_link(&dev->kobj, "device"); - out_subsys: sysfs_remove_link(&dev->kobj, "subsystem"); out_devnode: sysfs_remove_link(&dev->kobj, "of_node"); +exit: + subsys_put(sp); return error; } static void device_remove_class_symlinks(struct device *dev) { + struct subsys_private *sp = class_to_subsys(dev->class); + if (dev_of_node(dev)) sysfs_remove_link(&dev->kobj, "of_node"); - if (!dev->class) + if (!sp) return; if (dev->parent && device_is_not_partition(dev)) sysfs_remove_link(&dev->kobj, "device"); sysfs_remove_link(&dev->kobj, "subsystem"); -#ifdef CONFIG_BLOCK - if (sysfs_deprecated && dev->class == &block_class) - return; -#endif - sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev)); + sysfs_delete_link(&sp->subsys.kobj, &dev->kobj, dev_name(dev)); + subsys_put(sp); } /** @@ -3270,27 +3495,13 @@ int dev_set_name(struct device *dev, const char *fmt, ...) } EXPORT_SYMBOL_GPL(dev_set_name); -/** - * device_to_dev_kobj - select a /sys/dev/ directory for the device - * @dev: device - * - * By default we select char/ for new entries. Setting class->dev_obj - * to NULL prevents an entry from being created. class->dev_kobj must - * be set (or cleared) before any devices are registered to the class - * otherwise device_create_sys_dev_entry() and - * device_remove_sys_dev_entry() will disagree about the presence of - * the link. - */ +/* select a /sys/dev/ directory for the device */ static struct kobject *device_to_dev_kobj(struct device *dev) { - struct kobject *kobj; - - if (dev->class) - kobj = dev->class->dev_kobj; + if (is_blockdev(dev)) + return sysfs_dev_block_kobj; else - kobj = sysfs_dev_char_kobj; - - return kobj; + return sysfs_dev_char_kobj; } static int device_create_sys_dev_entry(struct device *dev) @@ -3359,6 +3570,7 @@ static int device_private_init(struct device *dev) */ int device_add(struct device *dev) { + struct subsys_private *sp; struct device *parent; struct kobject *kobj; struct class_interface *class_intf; @@ -3381,18 +3593,19 @@ int device_add(struct device *dev) * the name, and force the use of dev_name() */ if (dev->init_name) { - dev_set_name(dev, "%s", dev->init_name); + error = dev_set_name(dev, "%s", dev->init_name); dev->init_name = NULL; } + if (dev_name(dev)) + error = 0; /* subsystems can specify simple device enumeration */ - if (!dev_name(dev) && dev->bus && dev->bus->dev_name) - dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); - - if (!dev_name(dev)) { + else if (dev->bus && dev->bus->dev_name) + error = dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); + else error = -EINVAL; + if (error) goto name_error; - } pr_debug("device: '%s': %s\n", dev_name(dev), __func__); @@ -3413,7 +3626,7 @@ int device_add(struct device *dev) /* we require the name to be set before, and pass NULL */ error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); if (error) { - glue_dir = get_glue_dir(dev); + glue_dir = kobj; goto Error; } @@ -3453,10 +3666,7 @@ int device_add(struct device *dev) /* Notify clients of device addition. This call must come * after dpm_sysfs_add() and before kobject_uevent(). */ - if (dev->bus) - blocking_notifier_call_chain(&dev->bus->p->bus_notifier, - BUS_NOTIFY_ADD_DEVICE, dev); - + bus_notify(dev, BUS_NOTIFY_ADD_DEVICE); kobject_uevent(&dev->kobj, KOBJ_ADD); /* @@ -3490,18 +3700,18 @@ int device_add(struct device *dev) klist_add_tail(&dev->p->knode_parent, &parent->p->klist_children); - if (dev->class) { - mutex_lock(&dev->class->p->mutex); + sp = class_to_subsys(dev->class); + if (sp) { + mutex_lock(&sp->mutex); /* tie the class to the device */ - klist_add_tail(&dev->p->knode_class, - &dev->class->p->klist_devices); + klist_add_tail(&dev->p->knode_class, &sp->klist_devices); /* notify any interfaces that the device is here */ - list_for_each_entry(class_intf, - &dev->class->p->interfaces, node) + list_for_each_entry(class_intf, &sp->interfaces, node) if (class_intf->add_dev) - class_intf->add_dev(dev, class_intf); - mutex_unlock(&dev->class->p->mutex); + class_intf->add_dev(dev); + mutex_unlock(&sp->mutex); + subsys_put(sp); } done: put_device(dev); @@ -3513,6 +3723,7 @@ done: device_pm_remove(dev); dpm_sysfs_remove(dev); DPMError: + device_set_driver(dev, NULL); bus_remove_device(dev); BusError: device_remove_attrs(dev); @@ -3620,6 +3831,7 @@ EXPORT_SYMBOL_GPL(kill_device); */ void device_del(struct device *dev) { + struct subsys_private *sp; struct device *parent = dev->parent; struct kobject *glue_dir = NULL; struct class_interface *class_intf; @@ -3636,9 +3848,7 @@ void device_del(struct device *dev) * before dpm_sysfs_remove(). */ noio_flag = memalloc_noio_save(); - if (dev->bus) - blocking_notifier_call_chain(&dev->bus->p->bus_notifier, - BUS_NOTIFY_DEL_DEVICE, dev); + bus_notify(dev, BUS_NOTIFY_DEL_DEVICE); dpm_sysfs_remove(dev); if (parent) @@ -3648,18 +3858,20 @@ void device_del(struct device *dev) device_remove_sys_dev_entry(dev); device_remove_file(dev, &dev_attr_dev); } - if (dev->class) { + + sp = class_to_subsys(dev->class); + if (sp) { device_remove_class_symlinks(dev); - mutex_lock(&dev->class->p->mutex); + mutex_lock(&sp->mutex); /* notify any interfaces that the device is now gone */ - list_for_each_entry(class_intf, - &dev->class->p->interfaces, node) + list_for_each_entry(class_intf, &sp->interfaces, node) if (class_intf->remove_dev) - class_intf->remove_dev(dev, class_intf); + class_intf->remove_dev(dev); /* remove the device from the class list */ klist_del(&dev->p->knode_class); - mutex_unlock(&dev->class->p->mutex); + mutex_unlock(&sp->mutex); + subsys_put(sp); } device_remove_file(dev, &dev_attr_uevent); device_remove_attrs(dev); @@ -3669,9 +3881,18 @@ void device_del(struct device *dev) device_platform_notify_remove(dev); device_links_purge(dev); - if (dev->bus) - blocking_notifier_call_chain(&dev->bus->p->bus_notifier, - BUS_NOTIFY_REMOVED_DEVICE, dev); + /* + * If a device does not have a driver attached, we need to clean + * up any managed resources. We do this in device_release(), but + * it's never called (and we leak the device) if a managed + * resource holds a reference to the device. So release all + * managed resources here, like we do in driver_detach(). We + * still need to do so again in device_release() in case someone + * adds a new resource after this point, though. + */ + devres_release_all(dev); + + bus_notify(dev, BUS_NOTIFY_REMOVED_DEVICE); kobject_uevent(&dev->kobj, KOBJ_REMOVE); glue_dir = get_glue_dir(dev); kobject_del(&dev->kobj); @@ -3739,7 +3960,7 @@ static struct device *next_device(struct klist_iter *i) * a name. This memory is returned in tmp and needs to be * freed by the caller. */ -const char *device_get_devnode(struct device *dev, +const char *device_get_devnode(const struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid, const char **tmp) { @@ -3764,18 +3985,17 @@ const char *device_get_devnode(struct device *dev, return dev_name(dev); /* replace '!' in the name with '/' */ - s = kstrdup(dev_name(dev), GFP_KERNEL); + s = kstrdup_and_replace(dev_name(dev), '!', '/', GFP_KERNEL); if (!s) return NULL; - strreplace(s, '!', '/'); return *tmp = s; } /** * device_for_each_child - device child iterator. * @parent: parent struct device. - * @fn: function to be called for each device. * @data: data for the callback. + * @fn: function to be called for each device. * * Iterate over @parent's child devices, and call @fn for each, * passing it @data. @@ -3784,13 +4004,13 @@ const char *device_get_devnode(struct device *dev, * other than 0, we break out and return that value. */ int device_for_each_child(struct device *parent, void *data, - int (*fn)(struct device *dev, void *data)) + device_iter_t fn) { struct klist_iter i; struct device *child; int error = 0; - if (!parent->p) + if (!parent || !parent->p) return 0; klist_iter_init(&parent->p->klist_children, &i); @@ -3804,8 +4024,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child); /** * device_for_each_child_reverse - device child iterator in reversed order. * @parent: parent struct device. - * @fn: function to be called for each device. * @data: data for the callback. + * @fn: function to be called for each device. * * Iterate over @parent's child devices, and call @fn for each, * passing it @data. @@ -3814,13 +4034,13 @@ EXPORT_SYMBOL_GPL(device_for_each_child); * other than 0, we break out and return that value. */ int device_for_each_child_reverse(struct device *parent, void *data, - int (*fn)(struct device *dev, void *data)) + device_iter_t fn) { struct klist_iter i; struct device *child; int error = 0; - if (!parent->p) + if (!parent || !parent->p) return 0; klist_iter_init(&parent->p->klist_children, &i); @@ -3832,87 +4052,77 @@ int device_for_each_child_reverse(struct device *parent, void *data, EXPORT_SYMBOL_GPL(device_for_each_child_reverse); /** - * device_find_child - device iterator for locating a particular device. - * @parent: parent struct device - * @match: Callback function to check device - * @data: Data to pass to match function + * device_for_each_child_reverse_from - device child iterator in reversed order. + * @parent: parent struct device. + * @from: optional starting point in child list + * @data: data for the callback. + * @fn: function to be called for each device. * - * This is similar to the device_for_each_child() function above, but it - * returns a reference to a device that is 'found' for later use, as - * determined by the @match callback. + * Iterate over @parent's child devices, starting at @from, and call @fn + * for each, passing it @data. This helper is identical to + * device_for_each_child_reverse() when @from is NULL. * - * The callback should return 0 if the device doesn't match and non-zero - * if it does. If the callback returns non-zero and a reference to the - * current device can be obtained, this function will return to the caller - * and not iterate over any more devices. - * - * NOTE: you will need to drop the reference with put_device() after use. + * @fn is checked each iteration. If it returns anything other than 0, + * iteration stop and that value is returned to the caller of + * device_for_each_child_reverse_from(); */ -struct device *device_find_child(struct device *parent, void *data, - int (*match)(struct device *dev, void *data)) +int device_for_each_child_reverse_from(struct device *parent, + struct device *from, void *data, + device_iter_t fn) { struct klist_iter i; struct device *child; + int error = 0; - if (!parent) - return NULL; + if (!parent || !parent->p) + return 0; - klist_iter_init(&parent->p->klist_children, &i); - while ((child = next_device(&i))) - if (match(child, data) && get_device(child)) - break; + klist_iter_init_node(&parent->p->klist_children, &i, + (from ? &from->p->knode_parent : NULL)); + while ((child = prev_device(&i)) && !error) + error = fn(child, data); klist_iter_exit(&i); - return child; + return error; } -EXPORT_SYMBOL_GPL(device_find_child); +EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from); /** - * device_find_child_by_name - device iterator for locating a child device. + * device_find_child - device iterator for locating a particular device. * @parent: parent struct device - * @name: name of the child device + * @data: Data to pass to match function + * @match: Callback function to check device * - * This is similar to the device_find_child() function above, but it - * returns a reference to a device that has the name @name. + * This is similar to the device_for_each_child() function above, but it + * returns a reference to a device that is 'found' for later use, as + * determined by the @match callback. + * + * The callback should return 0 if the device doesn't match and non-zero + * if it does. If the callback returns non-zero and a reference to the + * current device can be obtained, this function will return to the caller + * and not iterate over any more devices. * * NOTE: you will need to drop the reference with put_device() after use. */ -struct device *device_find_child_by_name(struct device *parent, - const char *name) +struct device *device_find_child(struct device *parent, const void *data, + device_match_t match) { struct klist_iter i; struct device *child; - if (!parent) + if (!parent || !parent->p) return NULL; klist_iter_init(&parent->p->klist_children, &i); - while ((child = next_device(&i))) - if (sysfs_streq(dev_name(child), name) && get_device(child)) + while ((child = next_device(&i))) { + if (match(child, data)) { + get_device(child); break; + } + } klist_iter_exit(&i); return child; } -EXPORT_SYMBOL_GPL(device_find_child_by_name); - -static int match_any(struct device *dev, void *unused) -{ - return 1; -} - -/** - * device_find_any_child - device iterator for locating a child device, if any. - * @parent: parent struct device - * - * This is similar to the device_find_child() function above, but it - * returns a reference to a child device, if any. - * - * NOTE: you will need to drop the reference with put_device() after use. - */ -struct device *device_find_any_child(struct device *parent) -{ - return device_find_child(parent, NULL, match_any); -} -EXPORT_SYMBOL_GPL(device_find_any_child); +EXPORT_SYMBOL_GPL(device_find_child); int __init devices_init(void) { @@ -3928,9 +4138,14 @@ int __init devices_init(void) sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj); if (!sysfs_dev_char_kobj) goto char_kobj_err; + device_link_wq = alloc_workqueue("device_link_wq", WQ_PERCPU, 0); + if (!device_link_wq) + goto wq_err; return 0; + wq_err: + kobject_put(sysfs_dev_char_kobj); char_kobj_err: kobject_put(sysfs_dev_block_kobj); block_kobj_err: @@ -4124,7 +4339,7 @@ static void device_create_release(struct device *dev) } static __printf(6, 0) struct device * -device_create_groups_vargs(struct class *class, struct device *parent, +device_create_groups_vargs(const struct class *class, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, const char *fmt, va_list args) @@ -4184,11 +4399,8 @@ error: * pointer. * * Returns &struct device pointer on success, or ERR_PTR() on error. - * - * Note: the struct class passed to this function must have previously - * been created with a call to class_create(). */ -struct device *device_create(struct class *class, struct device *parent, +struct device *device_create(const struct class *class, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...) { va_list vargs; @@ -4225,11 +4437,8 @@ EXPORT_SYMBOL_GPL(device_create); * pointer. * * Returns &struct device pointer on success, or ERR_PTR() on error. - * - * Note: the struct class passed to this function must have previously - * been created with a call to class_create(). */ -struct device *device_create_with_groups(struct class *class, +struct device *device_create_with_groups(const struct class *class, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, @@ -4254,7 +4463,7 @@ EXPORT_SYMBOL_GPL(device_create_with_groups); * This call unregisters and cleans up a device that was created with a * call to device_create(). */ -void device_destroy(struct class *class, dev_t devt) +void device_destroy(const struct class *class, dev_t devt) { struct device *dev; @@ -4276,9 +4485,12 @@ EXPORT_SYMBOL_GPL(device_destroy); * on the same device to ensure that new_name is valid and * won't conflict with other devices. * - * Note: Don't call this function. Currently, the networking layer calls this - * function, but that will change. The following text from Kay Sievers offers - * some insight: + * Note: given that some subsystems (networking and infiniband) use this + * function, with no immediate plans for this to change, we cannot assume or + * require that this function not be called at all. + * + * However, if you're writing new code, do not call this function. The following + * text from Kay Sievers offers some insight: * * Renaming devices is racy at many levels, symlinks and other stuff are not * replaced atomically, and you get a "move" uevent, but it's not easy to @@ -4292,13 +4504,6 @@ EXPORT_SYMBOL_GPL(device_destroy); * kernel device renaming. Besides that, it's not even implemented now for * other things than (driver-core wise very simple) network devices. * - * We are currently about to change network renaming in udev to completely - * disallow renaming of devices in the same namespace as the kernel uses, - * because we can't solve the problems properly, that arise with swapping names - * of multiple interfaces without races. Means, renaming of eth[0-9]* will only - * be allowed to some other name than eth[0-9]*, for the aforementioned - * reasons. - * * Make up a "real" name in the driver before you register anything, or add * some other attributes for userspace to find the device, or use udev to add * symlinks -- but never rename kernel devices later, it's a complete mess. We @@ -4307,9 +4512,11 @@ EXPORT_SYMBOL_GPL(device_destroy); */ int device_rename(struct device *dev, const char *new_name) { + struct subsys_private *sp = NULL; struct kobject *kobj = &dev->kobj; char *old_device_name = NULL; int error; + bool is_link_renamed = false; dev = get_device(dev); if (!dev) @@ -4324,18 +4531,28 @@ int device_rename(struct device *dev, const char *new_name) } if (dev->class) { - error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj, - kobj, old_device_name, + sp = class_to_subsys(dev->class); + + if (!sp) { + error = -EINVAL; + goto out; + } + + error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name, new_name, kobject_namespace(kobj)); if (error) goto out; + + is_link_renamed = true; } error = kobject_rename(kobj, new_name); - if (error) - goto out; - out: + if (error && is_link_renamed) + sysfs_rename_link_ns(&sp->subsys.kobj, kobj, new_name, + old_device_name, kobject_namespace(kobj)); + subsys_put(sp); + put_device(dev); kfree(old_device_name); @@ -4451,7 +4668,7 @@ static int device_attrs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) { struct kobject *kobj = &dev->kobj; - struct class *class = dev->class; + const struct class *class = dev->class; const struct device_type *type = dev->type; int error; @@ -4509,6 +4726,7 @@ int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) { int error; struct kobject *kobj = &dev->kobj; + struct subsys_private *sp; dev = get_device(dev); if (!dev) @@ -4545,21 +4763,19 @@ int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) if (error) goto out; -#ifdef CONFIG_BLOCK - if (sysfs_deprecated && dev->class == &block_class) - goto out; -#endif - /* * Change the owner of the symlink located in the class directory of * the device class associated with @dev which points to the actual * directory entry for @dev to @kuid/@kgid. This ensures that the * symlink shows the same permissions as its target. */ - error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj, - dev_name(dev), kuid, kgid); - if (error) + sp = class_to_subsys(dev->class); + if (!sp) { + error = -EINVAL; goto out; + } + error = sysfs_link_change_owner(&sp->subsys.kobj, &dev->kobj, dev_name(dev), kuid, kgid); + subsys_put(sp); out: put_device(dev); @@ -4658,7 +4874,7 @@ set_dev_info(const struct device *dev, struct dev_printk_info *dev_info) else return; - strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem)); + strscpy(dev_info->subsystem, subsys); /* * Add device identifier DEVICE=: @@ -4768,6 +4984,49 @@ define_dev_printk_level(_dev_info, KERN_INFO); #endif +static void __dev_probe_failed(const struct device *dev, int err, bool fatal, + const char *fmt, va_list vargsp) +{ + struct va_format vaf; + va_list vargs; + + /* + * On x86_64 and possibly on other architectures, va_list is actually a + * size-1 array containing a structure. As a result, function parameter + * vargsp decays from T[1] to T*, and &vargsp has type T** rather than + * T(*)[1], which is expected by its assignment to vaf.va below. + * + * One standard way to solve this mess is by creating a copy in a local + * variable of type va_list and then using a pointer to that local copy + * instead, which is the approach employed here. + */ + va_copy(vargs, vargsp); + + vaf.fmt = fmt; + vaf.va = &vargs; + + switch (err) { + case -EPROBE_DEFER: + device_set_deferred_probe_reason(dev, &vaf); + dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf); + break; + + case -ENOMEM: + /* Don't print anything on -ENOMEM, there's already enough output */ + break; + + default: + /* Log fatal final failures as errors, otherwise produce warnings */ + if (fatal) + dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf); + else + dev_warn(dev, "error %pe: %pV", ERR_PTR(err), &vaf); + break; + } + + va_end(vargs); +} + /** * dev_err_probe - probe error check and log helper * @dev: the pointer to the struct device @@ -4780,7 +5039,7 @@ define_dev_printk_level(_dev_info, KERN_INFO); * -EPROBE_DEFER and propagate error upwards. * In case of -EPROBE_DEFER it sets also defer probe reason, which can be * checked later by reading devices_deferred debugfs attribute. - * It replaces code sequence:: + * It replaces the following code sequence:: * * if (err != -EPROBE_DEFER) * dev_err(dev, ...); @@ -4792,36 +5051,78 @@ define_dev_printk_level(_dev_info, KERN_INFO); * * return dev_err_probe(dev, err, ...); * - * Note that it is deemed acceptable to use this function for error - * prints during probe even if the @err is known to never be -EPROBE_DEFER. + * Using this helper in your probe function is totally fine even if @err + * is known to never be -EPROBE_DEFER. * The benefit compared to a normal dev_err() is the standardized format - * of the error code and the fact that the error code is returned. + * of the error code, which is emitted symbolically (i.e. you get "EAGAIN" + * instead of "-35"), and having the error code returned allows more + * compact error paths. * * Returns @err. - * */ int dev_err_probe(const struct device *dev, int err, const char *fmt, ...) { - struct va_format vaf; - va_list args; + va_list vargs; - va_start(args, fmt); - vaf.fmt = fmt; - vaf.va = &args; + va_start(vargs, fmt); - if (err != -EPROBE_DEFER) { - dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf); - } else { - device_set_deferred_probe_reason(dev, &vaf); - dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf); - } + /* Use dev_err() for logging when err doesn't equal -EPROBE_DEFER */ + __dev_probe_failed(dev, err, true, fmt, vargs); - va_end(args); + va_end(vargs); return err; } EXPORT_SYMBOL_GPL(dev_err_probe); +/** + * dev_warn_probe - probe error check and log helper + * @dev: the pointer to the struct device + * @err: error value to test + * @fmt: printf-style format string + * @...: arguments as specified in the format string + * + * This helper implements common pattern present in probe functions for error + * checking: print debug or warning message depending if the error value is + * -EPROBE_DEFER and propagate error upwards. + * In case of -EPROBE_DEFER it sets also defer probe reason, which can be + * checked later by reading devices_deferred debugfs attribute. + * It replaces the following code sequence:: + * + * if (err != -EPROBE_DEFER) + * dev_warn(dev, ...); + * else + * dev_dbg(dev, ...); + * return err; + * + * with:: + * + * return dev_warn_probe(dev, err, ...); + * + * Using this helper in your probe function is totally fine even if @err + * is known to never be -EPROBE_DEFER. + * The benefit compared to a normal dev_warn() is the standardized format + * of the error code, which is emitted symbolically (i.e. you get "EAGAIN" + * instead of "-35"), and having the error code returned allows more + * compact error paths. + * + * Returns @err. + */ +int dev_warn_probe(const struct device *dev, int err, const char *fmt, ...) +{ + va_list vargs; + + va_start(vargs, fmt); + + /* Use dev_warn() for logging when err doesn't equal -EPROBE_DEFER */ + __dev_probe_failed(dev, err, false, fmt, vargs); + + va_end(vargs); + + return err; +} +EXPORT_SYMBOL_GPL(dev_warn_probe); + static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) { return fwnode && !IS_ERR(fwnode->secondary); @@ -4858,9 +5159,13 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) } else { if (fwnode_is_primary(fn)) { dev->fwnode = fn->secondary; + + /* Skip nullifying fn->secondary if the primary is shared */ + if (parent && fn == parent->fwnode) + return; + /* Set fn->secondary = NULL, so fn remains the primary fwnode */ - if (!(parent && fn == parent->fwnode)) - fn->secondary = NULL; + fn->secondary = NULL; } else { dev->fwnode = NULL; } @@ -4890,6 +5195,67 @@ void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode) EXPORT_SYMBOL_GPL(set_secondary_fwnode); /** + * device_remove_of_node - Remove an of_node from a device + * @dev: device whose device tree node is being removed + */ +void device_remove_of_node(struct device *dev) +{ + dev = get_device(dev); + if (!dev) + return; + + if (!dev->of_node) + goto end; + + if (dev->fwnode == of_fwnode_handle(dev->of_node)) + dev->fwnode = NULL; + + of_node_put(dev->of_node); + dev->of_node = NULL; + +end: + put_device(dev); +} +EXPORT_SYMBOL_GPL(device_remove_of_node); + +/** + * device_add_of_node - Add an of_node to an existing device + * @dev: device whose device tree node is being added + * @of_node: of_node to add + * + * Return: 0 on success or error code on failure. + */ +int device_add_of_node(struct device *dev, struct device_node *of_node) +{ + int ret; + + if (!of_node) + return -EINVAL; + + dev = get_device(dev); + if (!dev) + return -EINVAL; + + if (dev->of_node) { + dev_err(dev, "Cannot replace node %pOF with %pOF\n", + dev->of_node, of_node); + ret = -EBUSY; + goto end; + } + + dev->of_node = of_node_get(of_node); + + if (!dev->fwnode) + dev->fwnode = of_fwnode_handle(of_node); + + ret = 0; +end: + put_device(dev); + return ret; +} +EXPORT_SYMBOL_GPL(device_add_of_node); + +/** * device_set_of_node_from_dev - reuse device-tree node of another device * @dev: device whose device-tree node is being set * @dev2: device whose device-tree node is being reused @@ -4912,21 +5278,52 @@ void device_set_node(struct device *dev, struct fwnode_handle *fwnode) } EXPORT_SYMBOL_GPL(device_set_node); +/** + * get_dev_from_fwnode - Obtain a reference count of the struct device the + * struct fwnode_handle is associated with. + * @fwnode: The pointer to the struct fwnode_handle to obtain the struct device + * reference count of. + * + * This function obtains a reference count of the device the device pointer + * embedded in the struct fwnode_handle points to. + * + * Note that the struct device pointer embedded in struct fwnode_handle does + * *not* have a reference count of the struct device itself. + * + * Hence, it is a UAF (and thus a bug) to call this function if the caller can't + * guarantee that the last reference count of the corresponding struct device is + * not dropped concurrently. + * + * This is possible since struct fwnode_handle has its own reference count and + * hence can out-live the struct device it is associated with. + */ +struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode) +{ + return get_device((fwnode)->dev); +} +EXPORT_SYMBOL_GPL(get_dev_from_fwnode); + int device_match_name(struct device *dev, const void *name) { return sysfs_streq(dev_name(dev), name); } EXPORT_SYMBOL_GPL(device_match_name); +int device_match_type(struct device *dev, const void *type) +{ + return dev->type == type; +} +EXPORT_SYMBOL_GPL(device_match_type); + int device_match_of_node(struct device *dev, const void *np) { - return dev->of_node == np; + return np && dev->of_node == np; } EXPORT_SYMBOL_GPL(device_match_of_node); int device_match_fwnode(struct device *dev, const void *fwnode) { - return dev_fwnode(dev) == fwnode; + return fwnode && dev_fwnode(dev) == fwnode; } EXPORT_SYMBOL_GPL(device_match_fwnode); @@ -4938,13 +5335,13 @@ EXPORT_SYMBOL_GPL(device_match_devt); int device_match_acpi_dev(struct device *dev, const void *adev) { - return ACPI_COMPANION(dev) == adev; + return adev && ACPI_COMPANION(dev) == adev; } EXPORT_SYMBOL(device_match_acpi_dev); int device_match_acpi_handle(struct device *dev, const void *handle) { - return ACPI_HANDLE(dev) == handle; + return handle && ACPI_HANDLE(dev) == handle; } EXPORT_SYMBOL(device_match_acpi_handle); |
