diff options
Diffstat (limited to 'drivers/base/cacheinfo.c')
| -rw-r--r-- | drivers/base/cacheinfo.c | 436 |
1 files changed, 382 insertions, 54 deletions
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 950b22cdb5f7..613410705a47 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -8,13 +8,14 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/acpi.h> +#include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/cacheinfo.h> #include <linux/compiler.h> #include <linux/cpu.h> #include <linux/device.h> #include <linux/init.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/smp.h> @@ -28,6 +29,9 @@ static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo); #define per_cpu_cacheinfo_idx(cpu, idx) \ (per_cpu_cacheinfo(cpu) + (idx)) +/* Set if no cache information is found in DT/ACPI. */ +static bool use_arch_info; + struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) { return ci_cacheinfo(cpu); @@ -38,11 +42,11 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, { /* * For non DT/ACPI systems, assume unique level 1 caches, - * system-wide shared caches for all other levels. This will be used - * only if arch specific code has not populated shared_cpu_map + * system-wide shared caches for all other levels. */ - if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI))) - return !(this_leaf->level == 1); + if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)) || + use_arch_info) + return (this_leaf->level != 1) && (sib_leaf->level != 1); if ((sib_leaf->attributes & CACHE_ID) && (this_leaf->attributes & CACHE_ID)) @@ -55,7 +59,7 @@ bool last_level_cache_is_valid(unsigned int cpu) { struct cacheinfo *llc; - if (!cache_leaves(cpu)) + if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu)) return false; llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); @@ -79,6 +83,9 @@ bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y) } #ifdef CONFIG_OF + +static bool of_check_cache_nodes(struct device_node *np); + /* OF properties to query for a given cache type */ struct cache_type_info { const char *size_prop; @@ -177,6 +184,54 @@ static bool cache_node_is_unified(struct cacheinfo *this_leaf, return of_property_read_bool(np, "cache-unified"); } +static bool match_cache_node(struct device_node *cpu, + const struct device_node *cache_node) +{ + struct device_node *prev, *cache = of_find_next_cache_node(cpu); + + while (cache) { + if (cache == cache_node) { + of_node_put(cache); + return true; + } + + prev = cache; + cache = of_find_next_cache_node(cache); + of_node_put(prev); + } + + return false; +} + +#ifndef arch_compact_of_hwid +#define arch_compact_of_hwid(_x) (_x) +#endif + +static void cache_of_set_id(struct cacheinfo *this_leaf, + struct device_node *cache_node) +{ + struct device_node *cpu; + u32 min_id = ~0; + + for_each_of_cpu_node(cpu) { + u64 id = of_get_cpu_hwid(cpu, 0); + + id = arch_compact_of_hwid(id); + if (FIELD_GET(GENMASK_ULL(63, 32), id)) { + of_node_put(cpu); + return; + } + + if (match_cache_node(cpu, cache_node)) + min_id = min(min_id, id); + } + + if (min_id != ~0) { + this_leaf->id = min_id; + this_leaf->attributes |= CACHE_ID; + } +} + static void cache_of_set_props(struct cacheinfo *this_leaf, struct device_node *np) { @@ -192,28 +247,29 @@ static void cache_of_set_props(struct cacheinfo *this_leaf, cache_get_line_size(this_leaf, np); cache_nr_sets(this_leaf, np); cache_associativity(this_leaf); + cache_of_set_id(this_leaf, np); } static int cache_setup_of_node(unsigned int cpu) { - struct device_node *np, *prev; struct cacheinfo *this_leaf; unsigned int index = 0; - np = of_cpu_device_node_get(cpu); + struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu); if (!np) { pr_err("Failed to find cpu%d device node\n", cpu); return -ENOENT; } - prev = np; + if (!of_check_cache_nodes(np)) { + return -ENOENT; + } while (index < cache_leaves(cpu)) { this_leaf = per_cpu_cacheinfo_idx(cpu, index); if (this_leaf->level != 1) { + struct device_node *prev __free(device_node) = np; np = of_find_next_cache_node(np); - of_node_put(prev); - prev = np; if (!np) break; } @@ -222,15 +278,92 @@ static int cache_setup_of_node(unsigned int cpu) index++; } - of_node_put(np); - if (index != cache_leaves(cpu)) /* not all OF nodes populated */ return -ENOENT; return 0; } + +static bool of_check_cache_nodes(struct device_node *np) +{ + if (of_property_present(np, "cache-size") || + of_property_present(np, "i-cache-size") || + of_property_present(np, "d-cache-size") || + of_property_present(np, "cache-unified")) + return true; + + struct device_node *next __free(device_node) = of_find_next_cache_node(np); + if (next) { + return true; + } + + return false; +} + +static int of_count_cache_leaves(struct device_node *np) +{ + unsigned int leaves = 0; + + if (of_property_present(np, "cache-size")) + ++leaves; + if (of_property_present(np, "i-cache-size")) + ++leaves; + if (of_property_present(np, "d-cache-size")) + ++leaves; + + if (!leaves) { + /* The '[i-|d-|]cache-size' property is required, but + * if absent, fallback on the 'cache-unified' property. + */ + if (of_property_read_bool(np, "cache-unified")) + return 1; + else + return 2; + } + + return leaves; +} + +int init_of_cache_level(unsigned int cpu) +{ + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu); + unsigned int levels = 0, leaves, level; + + if (!of_check_cache_nodes(np)) { + return -ENOENT; + } + + leaves = of_count_cache_leaves(np); + if (leaves > 0) + levels = 1; + + while (1) { + struct device_node *prev __free(device_node) = np; + np = of_find_next_cache_node(np); + if (!np) + break; + + if (!of_device_is_compatible(np, "cache")) + return -EINVAL; + if (of_property_read_u32(np, "cache-level", &level)) + return -EINVAL; + if (level <= levels) + return -EINVAL; + + leaves += of_count_cache_leaves(np); + levels = level; + } + + this_cpu_ci->num_levels = levels; + this_cpu_ci->num_leaves = leaves; + + return 0; +} + #else static inline int cache_setup_of_node(unsigned int cpu) { return 0; } +int init_of_cache_level(unsigned int cpu) { return 0; } #endif int __weak cache_setup_acpi(unsigned int cpu) @@ -249,6 +382,10 @@ static int cache_setup_properties(unsigned int cpu) else if (!acpi_disabled) ret = cache_setup_acpi(cpu); + // Assume there is no cache information available in DT/ACPI from now. + if (ret && use_arch_cache_info()) + use_arch_info = true; + return ret; } @@ -256,7 +393,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf, *sib_leaf; - unsigned int index; + unsigned int index, sib_index; int ret = 0; if (this_cpu_ci->cpu_map_populated) @@ -267,7 +404,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) * to update the shared cpu_map if the cache attributes were * populated early before all the cpus are brought online */ - if (!last_level_cache_is_valid(cpu)) { + if (!last_level_cache_is_valid(cpu) && !use_arch_info) { ret = cache_setup_properties(cpu); if (ret) return ret; @@ -280,15 +417,25 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); for_each_online_cpu(i) { - struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); - - if (i == cpu || !sib_cpu_ci->info_list) + if (i == cpu || !per_cpu_cacheinfo(i)) continue;/* skip if itself or no cacheinfo */ - - sib_leaf = per_cpu_cacheinfo_idx(i, index); - if (cache_leaves_are_shared(this_leaf, sib_leaf)) { - cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); - cpumask_set_cpu(i, &this_leaf->shared_cpu_map); + for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) { + sib_leaf = per_cpu_cacheinfo_idx(i, sib_index); + + /* + * Comparing cache IDs only makes sense if the leaves + * belong to the same cache level of same type. Skip + * the check if level and type do not match. + */ + if (sib_leaf->level != this_leaf->level || + sib_leaf->type != this_leaf->type) + continue; + + if (cache_leaves_are_shared(this_leaf, sib_leaf)) { + cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); + cpumask_set_cpu(i, &this_leaf->shared_cpu_map); + break; + } } } /* record the maximum cache line size */ @@ -296,28 +443,46 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) coherency_max_size = this_leaf->coherency_line_size; } + /* shared_cpu_map is now populated for the cpu */ + this_cpu_ci->cpu_map_populated = true; return 0; } static void cache_shared_cpu_map_remove(unsigned int cpu) { + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf, *sib_leaf; - unsigned int sibling, index; + unsigned int sibling, index, sib_index; for (index = 0; index < cache_leaves(cpu); index++) { this_leaf = per_cpu_cacheinfo_idx(cpu, index); for_each_cpu(sibling, &this_leaf->shared_cpu_map) { - struct cpu_cacheinfo *sib_cpu_ci = - get_cpu_cacheinfo(sibling); - - if (sibling == cpu || !sib_cpu_ci->info_list) + if (sibling == cpu || !per_cpu_cacheinfo(sibling)) continue;/* skip if itself or no cacheinfo */ - sib_leaf = per_cpu_cacheinfo_idx(sibling, index); - cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); - cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); + for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) { + sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index); + + /* + * Comparing cache IDs only makes sense if the leaves + * belong to the same cache level of same type. Skip + * the check if level and type do not match. + */ + if (sib_leaf->level != this_leaf->level || + sib_leaf->type != this_leaf->type) + continue; + + if (cache_leaves_are_shared(this_leaf, sib_leaf)) { + cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); + cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); + break; + } + } } } + + /* cpu is no longer populated in the shared map */ + this_cpu_ci->cpu_map_populated = false; } static void free_cache_attributes(unsigned int cpu) @@ -326,10 +491,11 @@ static void free_cache_attributes(unsigned int cpu) return; cache_shared_cpu_map_remove(cpu); +} - kfree(per_cpu_cacheinfo(cpu)); - per_cpu_cacheinfo(cpu) = NULL; - cache_leaves(cpu) = 0; +int __weak early_cache_level(unsigned int cpu) +{ + return -ENOENT; } int __weak init_cache_level(unsigned int cpu) @@ -342,38 +508,113 @@ int __weak populate_cache_leaves(unsigned int cpu) return -ENOENT; } -int detect_cache_attributes(unsigned int cpu) +static inline int allocate_cache_info(int cpu) { + per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_ATOMIC); + if (!per_cpu_cacheinfo(cpu)) { + cache_leaves(cpu) = 0; + return -ENOMEM; + } + + return 0; +} + +int fetch_cache_info(unsigned int cpu) +{ + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + unsigned int levels = 0, split_levels = 0; int ret; - /* Since early detection of the cacheinfo is allowed via this - * function and this also gets called as CPU hotplug callbacks via - * cacheinfo_cpu_online, the initialisation can be skipped and only - * CPU maps can be updated as the CPU online status would be update - * if called via cacheinfo_cpu_online path. + if (acpi_disabled) { + ret = init_of_cache_level(cpu); + } else { + ret = acpi_get_cache_info(cpu, &levels, &split_levels); + if (!ret) { + this_cpu_ci->num_levels = levels; + /* + * This assumes that: + * - there cannot be any split caches (data/instruction) + * above a unified cache + * - data/instruction caches come by pair + */ + this_cpu_ci->num_leaves = levels + split_levels; + } + } + + if (ret || !cache_leaves(cpu)) { + ret = early_cache_level(cpu); + if (ret) + return ret; + + if (!cache_leaves(cpu)) + return -ENOENT; + + this_cpu_ci->early_ci_levels = true; + } + + return allocate_cache_info(cpu); +} + +static inline int init_level_allocate_ci(unsigned int cpu) +{ + unsigned int early_leaves = cache_leaves(cpu); + + /* Since early initialization/allocation of the cacheinfo is allowed + * via fetch_cache_info() and this also gets called as CPU hotplug + * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped + * as it will happen only once (the cacheinfo memory is never freed). + * Just populate the cacheinfo. However, if the cacheinfo has been + * allocated early through the arch-specific early_cache_level() call, + * there is a chance the info is wrong (this can happen on arm64). In + * that case, call init_cache_level() anyway to give the arch-specific + * code a chance to make things right. */ - if (per_cpu_cacheinfo(cpu)) - goto update_cpu_map; + if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels) + return 0; if (init_cache_level(cpu) || !cache_leaves(cpu)) return -ENOENT; - per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), - sizeof(struct cacheinfo), GFP_ATOMIC); - if (per_cpu_cacheinfo(cpu) == NULL) { - cache_leaves(cpu) = 0; - return -ENOMEM; - } + /* + * Now that we have properly initialized the cache level info, make + * sure we don't try to do that again the next time we are called + * (e.g. as CPU hotplug callbacks). + */ + ci_cacheinfo(cpu)->early_ci_levels = false; /* - * populate_cache_leaves() may completely setup the cache leaves and - * shared_cpu_map or it may leave it partially setup. + * Some architectures (e.g., x86) do not use early initialization. + * Allocate memory now in such case. */ - ret = populate_cache_leaves(cpu); + if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu)) + return 0; + + kfree(per_cpu_cacheinfo(cpu)); + return allocate_cache_info(cpu); +} + +int detect_cache_attributes(unsigned int cpu) +{ + int ret; + + ret = init_level_allocate_ci(cpu); if (ret) - goto free_ci; + return ret; + + /* + * If LLC is valid the cache leaves were already populated so just go to + * update the cpu map. + */ + if (!last_level_cache_is_valid(cpu)) { + /* + * populate_cache_leaves() may completely setup the cache leaves and + * shared_cpu_map or it may leave it partially setup. + */ + ret = populate_cache_leaves(cpu); + if (ret) + goto free_ci; + } -update_cpu_map: /* * For systems using DT for cache hierarchy, fw_token * and shared_cpu_map will be set up here only if they are @@ -689,24 +930,111 @@ err: return rc; } +static unsigned int cpu_map_shared_cache(bool online, unsigned int cpu, + cpumask_t **map) +{ + struct cacheinfo *llc, *sib_llc; + unsigned int sibling; + + if (!last_level_cache_is_valid(cpu)) + return 0; + + llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); + + if (llc->type != CACHE_TYPE_DATA && llc->type != CACHE_TYPE_UNIFIED) + return 0; + + if (online) { + *map = &llc->shared_cpu_map; + return cpumask_weight(*map); + } + + /* shared_cpu_map of offlined CPU will be cleared, so use sibling map */ + for_each_cpu(sibling, &llc->shared_cpu_map) { + if (sibling == cpu || !last_level_cache_is_valid(sibling)) + continue; + sib_llc = per_cpu_cacheinfo_idx(sibling, cache_leaves(sibling) - 1); + *map = &sib_llc->shared_cpu_map; + return cpumask_weight(*map); + } + + return 0; +} + +/* + * Calculate the size of the per-CPU data cache slice. This can be + * used to estimate the size of the data cache slice that can be used + * by one CPU under ideal circumstances. UNIFIED caches are counted + * in addition to DATA caches. So, please consider code cache usage + * when use the result. + * + * Because the cache inclusive/non-inclusive information isn't + * available, we just use the size of the per-CPU slice of LLC to make + * the result more predictable across architectures. + */ +static void update_per_cpu_data_slice_size_cpu(unsigned int cpu) +{ + struct cpu_cacheinfo *ci; + struct cacheinfo *llc; + unsigned int nr_shared; + + if (!last_level_cache_is_valid(cpu)) + return; + + ci = ci_cacheinfo(cpu); + llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); + + if (llc->type != CACHE_TYPE_DATA && llc->type != CACHE_TYPE_UNIFIED) + return; + + nr_shared = cpumask_weight(&llc->shared_cpu_map); + if (nr_shared) + ci->per_cpu_data_slice_size = llc->size / nr_shared; +} + +static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu, + cpumask_t *cpu_map) +{ + unsigned int icpu; + + for_each_cpu(icpu, cpu_map) { + if (!cpu_online && icpu == cpu) + continue; + update_per_cpu_data_slice_size_cpu(icpu); + setup_pcp_cacheinfo(icpu); + } +} + static int cacheinfo_cpu_online(unsigned int cpu) { int rc = detect_cache_attributes(cpu); + cpumask_t *cpu_map; if (rc) return rc; rc = cache_add_dev(cpu); if (rc) - free_cache_attributes(cpu); + goto err; + if (cpu_map_shared_cache(true, cpu, &cpu_map)) + update_per_cpu_data_slice_size(true, cpu, cpu_map); + return 0; +err: + free_cache_attributes(cpu); return rc; } static int cacheinfo_cpu_pre_down(unsigned int cpu) { + cpumask_t *cpu_map; + unsigned int nr_shared; + + nr_shared = cpu_map_shared_cache(false, cpu, &cpu_map); if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map)) cpu_cache_sysfs_exit(cpu); free_cache_attributes(cpu); + if (nr_shared > 1) + update_per_cpu_data_slice_size(false, cpu, cpu_map); return 0; } |
