summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/auxiliary.c2
-rw-r--r--drivers/base/cacheinfo.c50
-rw-r--r--drivers/base/core.c81
-rw-r--r--drivers/base/devcoredump.c2
-rw-r--r--drivers/base/firmware_loader/sysfs.c6
-rw-r--r--drivers/base/memory.c21
-rw-r--r--drivers/base/node.c123
-rw-r--r--drivers/base/power/main.c16
-rw-r--r--drivers/base/power/runtime.c6
-rw-r--r--drivers/base/topology.c2
10 files changed, 181 insertions, 128 deletions
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 44cd3f85b659..12ffdd843756 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -399,6 +399,7 @@ static void auxiliary_device_release(struct device *dev)
{
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+ of_node_put(dev->of_node);
kfree(auxdev);
}
@@ -435,6 +436,7 @@ struct auxiliary_device *auxiliary_device_create(struct device *dev,
ret = auxiliary_device_init(auxdev);
if (ret) {
+ of_node_put(auxdev->dev.of_node);
kfree(auxdev);
return NULL;
}
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index cf0d455209d7..613410705a47 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/cacheinfo.h>
#include <linux/compiler.h>
@@ -183,6 +184,54 @@ static bool cache_node_is_unified(struct cacheinfo *this_leaf,
return of_property_read_bool(np, "cache-unified");
}
+static bool match_cache_node(struct device_node *cpu,
+ const struct device_node *cache_node)
+{
+ struct device_node *prev, *cache = of_find_next_cache_node(cpu);
+
+ while (cache) {
+ if (cache == cache_node) {
+ of_node_put(cache);
+ return true;
+ }
+
+ prev = cache;
+ cache = of_find_next_cache_node(cache);
+ of_node_put(prev);
+ }
+
+ return false;
+}
+
+#ifndef arch_compact_of_hwid
+#define arch_compact_of_hwid(_x) (_x)
+#endif
+
+static void cache_of_set_id(struct cacheinfo *this_leaf,
+ struct device_node *cache_node)
+{
+ struct device_node *cpu;
+ u32 min_id = ~0;
+
+ for_each_of_cpu_node(cpu) {
+ u64 id = of_get_cpu_hwid(cpu, 0);
+
+ id = arch_compact_of_hwid(id);
+ if (FIELD_GET(GENMASK_ULL(63, 32), id)) {
+ of_node_put(cpu);
+ return;
+ }
+
+ if (match_cache_node(cpu, cache_node))
+ min_id = min(min_id, id);
+ }
+
+ if (min_id != ~0) {
+ this_leaf->id = min_id;
+ this_leaf->attributes |= CACHE_ID;
+ }
+}
+
static void cache_of_set_props(struct cacheinfo *this_leaf,
struct device_node *np)
{
@@ -198,6 +247,7 @@ static void cache_of_set_props(struct cacheinfo *this_leaf,
cache_get_line_size(this_leaf, np);
cache_nr_sets(this_leaf, np);
cache_associativity(this_leaf);
+ cache_of_set_id(this_leaf, np);
}
static int cache_setup_of_node(unsigned int cpu)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index cbc0099d8ef2..d22d6b23e758 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -460,9 +460,9 @@ static ssize_t auto_remove_on_show(struct device *dev,
struct device_link *link = to_devlink(dev);
const char *output;
- if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+ if (device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER))
output = "supplier unbind";
- else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
+ else if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER))
output = "consumer unbind";
else
output = "never";
@@ -476,7 +476,7 @@ static ssize_t runtime_pm_show(struct device *dev,
{
struct device_link *link = to_devlink(dev);
- return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
+ return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_PM_RUNTIME));
}
static DEVICE_ATTR_RO(runtime_pm);
@@ -485,8 +485,7 @@ static ssize_t sync_state_only_show(struct device *dev,
{
struct device_link *link = to_devlink(dev);
- return sysfs_emit(buf, "%d\n",
- !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
}
static DEVICE_ATTR_RO(sync_state_only);
@@ -792,12 +791,12 @@ struct device_link *device_link_add(struct device *consumer,
if (link->consumer != consumer)
continue;
- if (link->flags & DL_FLAG_INFERRED &&
+ if (device_link_test(link, DL_FLAG_INFERRED) &&
!(flags & DL_FLAG_INFERRED))
link->flags &= ~DL_FLAG_INFERRED;
if (flags & DL_FLAG_PM_RUNTIME) {
- if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) {
pm_runtime_new_link(consumer);
link->flags |= DL_FLAG_PM_RUNTIME;
}
@@ -807,8 +806,8 @@ struct device_link *device_link_add(struct device *consumer,
if (flags & DL_FLAG_STATELESS) {
kref_get(&link->kref);
- if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
- !(link->flags & DL_FLAG_STATELESS)) {
+ if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) &&
+ !device_link_test(link, DL_FLAG_STATELESS)) {
link->flags |= DL_FLAG_STATELESS;
goto reorder;
} else {
@@ -823,7 +822,7 @@ struct device_link *device_link_add(struct device *consumer,
* update the existing link to stay around longer.
*/
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
- if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
+ if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) {
link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
}
@@ -831,12 +830,12 @@ struct device_link *device_link_add(struct device *consumer,
link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER);
}
- if (!(link->flags & DL_FLAG_MANAGED)) {
+ if (!device_link_test(link, DL_FLAG_MANAGED)) {
kref_get(&link->kref);
link->flags |= DL_FLAG_MANAGED;
device_link_init_status(link, consumer, supplier);
}
- if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
+ if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) &&
!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
goto reorder;
@@ -940,7 +939,7 @@ static void __device_link_del(struct kref *kref)
static void device_link_put_kref(struct device_link *link)
{
- if (link->flags & DL_FLAG_STATELESS)
+ if (device_link_test(link, DL_FLAG_STATELESS))
kref_put(&link->kref, __device_link_del);
else if (!device_is_registered(link->consumer))
__device_link_del(&link->kref);
@@ -1004,7 +1003,7 @@ static void device_links_missing_supplier(struct device *dev)
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
} else {
- WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
}
@@ -1072,14 +1071,14 @@ int device_links_check_suppliers(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_AVAILABLE &&
- !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ !device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) {
if (dev_is_best_effort(dev) &&
- link->flags & DL_FLAG_INFERRED &&
+ device_link_test(link, DL_FLAG_INFERRED) &&
!link->supplier->can_match) {
ret = -EAGAIN;
continue;
@@ -1128,7 +1127,7 @@ static void __device_links_queue_sync_state(struct device *dev,
return;
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_ACTIVE)
return;
@@ -1268,7 +1267,7 @@ void device_links_force_bind(struct device *dev)
device_links_write_lock();
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_AVAILABLE) {
@@ -1329,7 +1328,7 @@ void device_links_driver_bound(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
/*
@@ -1345,7 +1344,7 @@ void device_links_driver_bound(struct device *dev)
WARN_ON(link->status != DL_STATE_DORMANT);
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
- if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
+ if (device_link_test(link, DL_FLAG_AUTOPROBE_CONSUMER))
driver_deferred_probe_add(link->consumer);
}
@@ -1357,11 +1356,11 @@ void device_links_driver_bound(struct device *dev)
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
struct device *supplier;
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
supplier = link->supplier;
- if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
+ if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) {
/*
* When DL_FLAG_SYNC_STATE_ONLY is set, it means no
* other DL_MANAGED_LINK_FLAGS have been set. So, it's
@@ -1369,7 +1368,7 @@ void device_links_driver_bound(struct device *dev)
*/
device_link_drop_managed(link);
} else if (dev_is_best_effort(dev) &&
- link->flags & DL_FLAG_INFERRED &&
+ device_link_test(link, DL_FLAG_INFERRED) &&
link->status != DL_STATE_CONSUMER_PROBE &&
!link->supplier->can_match) {
/*
@@ -1421,10 +1420,10 @@ static void __device_links_no_driver(struct device *dev)
struct device_link *link, *ln;
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
- if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
+ if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) {
device_link_drop_managed(link);
continue;
}
@@ -1436,7 +1435,7 @@ static void __device_links_no_driver(struct device *dev)
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
} else {
- WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
}
@@ -1461,7 +1460,7 @@ void device_links_no_driver(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
/*
@@ -1498,10 +1497,10 @@ void device_links_driver_cleanup(struct device *dev)
device_links_write_lock();
list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
- WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
+ WARN_ON(device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER));
WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
/*
@@ -1510,7 +1509,7 @@ void device_links_driver_cleanup(struct device *dev)
* has moved to DL_STATE_SUPPLIER_UNBIND.
*/
if (link->status == DL_STATE_SUPPLIER_UNBIND &&
- link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+ device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER))
device_link_drop_managed(link);
WRITE_ONCE(link->status, DL_STATE_DORMANT);
@@ -1544,7 +1543,7 @@ bool device_links_busy(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status == DL_STATE_CONSUMER_PROBE
@@ -1586,8 +1585,8 @@ void device_links_unbind_consumers(struct device *dev)
list_for_each_entry(link, &dev->links.consumers, s_node) {
enum device_link_state status;
- if (!(link->flags & DL_FLAG_MANAGED) ||
- link->flags & DL_FLAG_SYNC_STATE_ONLY)
+ if (!device_link_test(link, DL_FLAG_MANAGED) ||
+ device_link_test(link, DL_FLAG_SYNC_STATE_ONLY))
continue;
status = link->status;
@@ -1743,7 +1742,7 @@ static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
static void fw_devlink_relax_link(struct device_link *link)
{
- if (!(link->flags & DL_FLAG_INFERRED))
+ if (!device_link_test(link, DL_FLAG_INFERRED))
return;
if (device_link_flag_is_sync_state_only(link->flags))
@@ -1779,7 +1778,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data)
struct device_link *link = to_devlink(dev);
struct device *sup = link->supplier;
- if (!(link->flags & DL_FLAG_MANAGED) ||
+ if (!device_link_test(link, DL_FLAG_MANAGED) ||
link->status == DL_STATE_ACTIVE || sup->state_synced ||
!dev_has_sync_state(sup))
return 0;
@@ -1881,8 +1880,6 @@ static void fw_devlink_unblock_consumers(struct device *dev)
device_links_write_unlock();
}
-#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
-
static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
{
struct device *dev;
@@ -2063,7 +2060,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
* such due to a cycle.
*/
if (device_link_flag_is_sync_state_only(dev_link->flags) &&
- !(dev_link->flags & DL_FLAG_CYCLE))
+ !device_link_test(dev_link, DL_FLAG_CYCLE))
continue;
if (__fw_devlink_relax_cycles(con_handle,
@@ -5281,6 +5278,12 @@ void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(device_set_node);
+struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode)
+{
+ return get_device((fwnode)->dev);
+}
+EXPORT_SYMBOL_GPL(get_dev_from_fwnode);
+
int device_match_name(struct device *dev, const void *name)
{
return sysfs_streq(dev_name(dev), name);
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 03a39c417dc4..37faf6156d7c 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -140,7 +140,7 @@ static const struct bin_attribute *const devcd_dev_bin_attrs[] = {
};
static const struct attribute_group devcd_dev_group = {
- .bin_attrs_new = devcd_dev_bin_attrs,
+ .bin_attrs = devcd_dev_bin_attrs,
};
static const struct attribute_group *devcd_dev_groups[] = {
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index d254ceb56d84..add0b9b75edd 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -359,8 +359,8 @@ out:
static const struct bin_attribute firmware_attr_data = {
.attr = { .name = "data", .mode = 0644 },
.size = 0,
- .read_new = firmware_data_read,
- .write_new = firmware_data_write,
+ .read = firmware_data_read,
+ .write = firmware_data_write,
};
static struct attribute *fw_dev_attrs[] = {
@@ -381,7 +381,7 @@ static const struct bin_attribute *const fw_dev_bin_attrs[] = {
static const struct attribute_group fw_dev_attr_group = {
.attrs = fw_dev_attrs,
- .bin_attrs_new = fw_dev_bin_attrs,
+ .bin_attrs = fw_dev_bin_attrs,
#ifdef CONFIG_FW_UPLOAD
.is_visible = fw_upload_is_visible,
#endif
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index ed3e69dc785c..5c6c1d6bb59f 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -22,6 +22,7 @@
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/xarray.h>
+#include <linux/export.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
@@ -48,22 +49,8 @@ int mhp_online_type_from_str(const char *str)
#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
-static int sections_per_block;
-
-static inline unsigned long memory_block_id(unsigned long section_nr)
-{
- return section_nr / sections_per_block;
-}
-
-static inline unsigned long pfn_to_block_id(unsigned long pfn)
-{
- return memory_block_id(pfn_to_section_nr(pfn));
-}
-
-static inline unsigned long phys_to_block_id(unsigned long phys)
-{
- return pfn_to_block_id(PFN_DOWN(phys));
-}
+int sections_per_block;
+EXPORT_SYMBOL(sections_per_block);
static int memory_subsys_online(struct device *dev);
static int memory_subsys_offline(struct device *dev);
@@ -683,7 +670,7 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
*
* Called under device_hotplug_lock.
*/
-static struct memory_block *find_memory_block_by_id(unsigned long block_id)
+struct memory_block *find_memory_block_by_id(unsigned long block_id)
{
struct memory_block *mem;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index c19094481630..3399594136b2 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -21,6 +21,7 @@
#include <linux/pm_runtime.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/memblock.h>
static const struct bus_type node_subsys = {
.name = "node",
@@ -111,6 +112,27 @@ static const struct attribute_group *node_access_node_groups[] = {
NULL,
};
+#ifdef CONFIG_MEMORY_HOTPLUG
+static BLOCKING_NOTIFIER_HEAD(node_chain);
+
+int register_node_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&node_chain, nb);
+}
+EXPORT_SYMBOL(register_node_notifier);
+
+void unregister_node_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&node_chain, nb);
+}
+EXPORT_SYMBOL(unregister_node_notifier);
+
+int node_notify(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(&node_chain, val, v);
+}
+#endif
+
static void node_remove_accesses(struct node *node)
{
struct node_access_nodes *c, *cnext;
@@ -478,7 +500,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
nid, 0UL,
nid, 0UL,
- nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
+ nid, 0UL,
nid, K(sreclaimable +
node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
nid, K(sreclaimable + sunreclaimable),
@@ -597,7 +619,7 @@ static const struct bin_attribute *node_dev_bin_attrs[] = {
static const struct attribute_group node_dev_group = {
.attrs = node_dev_attrs,
- .bin_attrs_new = node_dev_bin_attrs,
+ .bin_attrs = node_dev_bin_attrs,
};
static const struct attribute_group *node_dev_groups[] = {
@@ -637,6 +659,7 @@ static int register_node(struct node *node, int num)
} else {
hugetlb_register_node(node);
compaction_register_node(node);
+ reclaim_register_node(node);
}
return error;
@@ -653,6 +676,7 @@ void unregister_node(struct node *node)
{
hugetlb_unregister_node(node);
compaction_unregister_node(node);
+ reclaim_unregister_node(node);
node_remove_accesses(node);
node_remove_caches(node);
device_unregister(&node->dev);
@@ -756,15 +780,6 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-static int __ref get_nid_for_pfn(unsigned long pfn)
-{
-#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
- if (system_state < SYSTEM_RUNNING)
- return early_pfn_to_nid(pfn);
-#endif
- return pfn_to_nid(pfn);
-}
-
static void do_register_memory_block_under_node(int nid,
struct memory_block *mem_blk,
enum meminit_context context)
@@ -791,46 +806,6 @@ static void do_register_memory_block_under_node(int nid,
ret);
}
-/* register memory section under specified node if it spans that node */
-static int register_mem_block_under_node_early(struct memory_block *mem_blk,
- void *arg)
-{
- unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
- unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
- unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
- int nid = *(int *)arg;
- unsigned long pfn;
-
- for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
- int page_nid;
-
- /*
- * memory block could have several absent sections from start.
- * skip pfn range from absent section
- */
- if (!pfn_in_present_section(pfn)) {
- pfn = round_down(pfn + PAGES_PER_SECTION,
- PAGES_PER_SECTION) - 1;
- continue;
- }
-
- /*
- * We need to check if page belongs to nid only at the boot
- * case because node's ranges can be interleaved.
- */
- page_nid = get_nid_for_pfn(pfn);
- if (page_nid < 0)
- continue;
- if (page_nid != nid)
- continue;
-
- do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY);
- return 0;
- }
- /* mem section does not span the specified node */
- return 0;
-}
-
/*
* During hotplug we know that all pages in the memory block belong to the same
* node.
@@ -859,24 +834,44 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}
-void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
- unsigned long end_pfn,
- enum meminit_context context)
+/* register all memory blocks under the corresponding nodes */
+static void register_memory_blocks_under_nodes(void)
{
- walk_memory_blocks_func_t func;
+ struct memblock_region *r;
+
+ for_each_mem_region(r) {
+ const unsigned long start_block_id = phys_to_block_id(r->base);
+ const unsigned long end_block_id = phys_to_block_id(r->base + r->size - 1);
+ const int nid = memblock_get_region_node(r);
+ unsigned long block_id;
- if (context == MEMINIT_HOTPLUG)
- func = register_mem_block_under_node_hotplug;
- else
- func = register_mem_block_under_node_early;
+ if (!node_online(nid))
+ continue;
+
+ for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
+ struct memory_block *mem;
+
+ mem = find_memory_block_by_id(block_id);
+ if (!mem)
+ continue;
+
+ do_register_memory_block_under_node(nid, mem, MEMINIT_EARLY);
+ put_device(&mem->dev);
+ }
+ }
+}
+
+void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn,
+ unsigned long end_pfn)
+{
walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
- (void *)&nid, func);
+ (void *)&nid, register_mem_block_under_node_hotplug);
return;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
-int __register_one_node(int nid)
+int register_one_node(int nid)
{
int error;
int cpu;
@@ -980,11 +975,13 @@ void __init node_dev_init(void)
/*
* Create all node devices, which will properly link the node
- * to applicable memory block devices and already created cpu devices.
+ * to already created cpu devices.
*/
for_each_online_node(i) {
- ret = register_one_node(i);
+ ret = register_one_node(i);
if (ret)
panic("%s() failed to add node: %d\n", __func__, ret);
}
+
+ register_memory_blocks_under_nodes();
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 8aa06d59a2ee..dbf5456cd891 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -66,6 +66,20 @@ static pm_message_t pm_transition;
static DEFINE_MUTEX(async_wip_mtx);
static int async_error;
+/**
+ * pm_hibernate_is_recovering - if recovering from hibernate due to error.
+ *
+ * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or
+ * recovering from some error.
+ *
+ * Return: true for error case, false for normal case.
+ */
+bool pm_hibernate_is_recovering(void)
+{
+ return pm_transition.event == PM_EVENT_RECOVER;
+}
+EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering);
+
static const char *pm_verb(int event)
{
switch (event) {
@@ -2052,7 +2066,7 @@ static bool device_prepare_smart_suspend(struct device *dev)
idx = device_links_read_lock();
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
if (!dev_pm_smart_suspend(link->supplier) &&
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index f835b85bd4d9..3e84dc4122de 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -302,7 +302,7 @@ static int rpm_get_suppliers(struct device *dev)
device_links_read_lock_held()) {
int retval;
- if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
retval = pm_runtime_get_sync(link->supplier);
@@ -1905,7 +1905,7 @@ void pm_runtime_get_suppliers(struct device *dev)
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held())
- if (link->flags & DL_FLAG_PM_RUNTIME) {
+ if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
}
@@ -1959,7 +1959,7 @@ static void pm_runtime_drop_link_count(struct device *dev)
*/
void pm_runtime_drop_link(struct device_link *link)
{
- if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
return;
pm_runtime_drop_link_count(link->consumer);
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 8b42df05feff..c890e2a5b428 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -179,7 +179,7 @@ static umode_t topology_is_visible(struct kobject *kobj,
static const struct attribute_group topology_attr_group = {
.attrs = default_attrs,
- .bin_attrs_new = bin_attrs,
+ .bin_attrs = bin_attrs,
.is_visible = topology_is_visible,
.name = "topology"
};