diff options
Diffstat (limited to 'drivers/base/cacheinfo.c')
| -rw-r--r-- | drivers/base/cacheinfo.c | 79 |
1 files changed, 63 insertions, 16 deletions
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 7a7609298e18..613410705a47 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -8,6 +8,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/acpi.h> +#include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/cacheinfo.h> #include <linux/compiler.h> @@ -58,7 +59,7 @@ bool last_level_cache_is_valid(unsigned int cpu) { struct cacheinfo *llc; - if (!cache_leaves(cpu)) + if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu)) return false; llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); @@ -183,6 +184,54 @@ static bool cache_node_is_unified(struct cacheinfo *this_leaf, return of_property_read_bool(np, "cache-unified"); } +static bool match_cache_node(struct device_node *cpu, + const struct device_node *cache_node) +{ + struct device_node *prev, *cache = of_find_next_cache_node(cpu); + + while (cache) { + if (cache == cache_node) { + of_node_put(cache); + return true; + } + + prev = cache; + cache = of_find_next_cache_node(cache); + of_node_put(prev); + } + + return false; +} + +#ifndef arch_compact_of_hwid +#define arch_compact_of_hwid(_x) (_x) +#endif + +static void cache_of_set_id(struct cacheinfo *this_leaf, + struct device_node *cache_node) +{ + struct device_node *cpu; + u32 min_id = ~0; + + for_each_of_cpu_node(cpu) { + u64 id = of_get_cpu_hwid(cpu, 0); + + id = arch_compact_of_hwid(id); + if (FIELD_GET(GENMASK_ULL(63, 32), id)) { + of_node_put(cpu); + return; + } + + if (match_cache_node(cpu, cache_node)) + min_id = min(min_id, id); + } + + if (min_id != ~0) { + this_leaf->id = min_id; + this_leaf->attributes |= CACHE_ID; + } +} + static void cache_of_set_props(struct cacheinfo *this_leaf, struct device_node *np) { @@ -198,6 +247,7 @@ static void cache_of_set_props(struct cacheinfo *this_leaf, cache_get_line_size(this_leaf, np); cache_nr_sets(this_leaf, np); cache_associativity(this_leaf); + cache_of_set_id(this_leaf, np); } static int cache_setup_of_node(unsigned int cpu) @@ -254,11 +304,11 @@ static int of_count_cache_leaves(struct device_node *np) { unsigned int leaves = 0; - if (of_property_read_bool(np, "cache-size")) + if (of_property_present(np, "cache-size")) ++leaves; - if (of_property_read_bool(np, "i-cache-size")) + if (of_property_present(np, "i-cache-size")) ++leaves; - if (of_property_read_bool(np, "d-cache-size")) + if (of_property_present(np, "d-cache-size")) ++leaves; if (!leaves) { @@ -367,9 +417,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); for_each_online_cpu(i) { - struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); - - if (i == cpu || !sib_cpu_ci->info_list) + if (i == cpu || !per_cpu_cacheinfo(i)) continue;/* skip if itself or no cacheinfo */ for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) { sib_leaf = per_cpu_cacheinfo_idx(i, sib_index); @@ -409,10 +457,7 @@ static void cache_shared_cpu_map_remove(unsigned int cpu) for (index = 0; index < cache_leaves(cpu); index++) { this_leaf = per_cpu_cacheinfo_idx(cpu, index); for_each_cpu(sibling, &this_leaf->shared_cpu_map) { - struct cpu_cacheinfo *sib_cpu_ci = - get_cpu_cacheinfo(sibling); - - if (sibling == cpu || !sib_cpu_ci->info_list) + if (sibling == cpu || !per_cpu_cacheinfo(sibling)) continue;/* skip if itself or no cacheinfo */ for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) { @@ -463,11 +508,9 @@ int __weak populate_cache_leaves(unsigned int cpu) return -ENOENT; } -static inline -int allocate_cache_info(int cpu) +static inline int allocate_cache_info(int cpu) { - per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), - sizeof(struct cacheinfo), GFP_ATOMIC); + per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_ATOMIC); if (!per_cpu_cacheinfo(cpu)) { cache_leaves(cpu) = 0; return -ENOMEM; @@ -539,7 +582,11 @@ static inline int init_level_allocate_ci(unsigned int cpu) */ ci_cacheinfo(cpu)->early_ci_levels = false; - if (cache_leaves(cpu) <= early_leaves) + /* + * Some architectures (e.g., x86) do not use early initialization. + * Allocate memory now in such case. + */ + if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu)) return 0; kfree(per_cpu_cacheinfo(cpu)); |
