diff options
Diffstat (limited to 'drivers/base/arch_topology.c')
| -rw-r--r-- | drivers/base/arch_topology.c | 559 |
1 files changed, 420 insertions, 139 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 921312a8d957..84ec92bff642 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -7,24 +7,30 @@ */ #include <linux/acpi.h> +#include <linux/cacheinfo.h> +#include <linux/cleanup.h> #include <linux/cpu.h> #include <linux/cpufreq.h> +#include <linux/cpu_smt.h> #include <linux/device.h> #include <linux/of.h> #include <linux/slab.h> -#include <linux/string.h> #include <linux/sched/topology.h> #include <linux/cpuset.h> #include <linux/cpumask.h> #include <linux/init.h> -#include <linux/percpu.h> #include <linux/rcupdate.h> #include <linux/sched.h> -#include <linux/smp.h> +#include <linux/units.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/hw_pressure.h> static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data); static struct cpumask scale_freq_counters_mask; static bool scale_freq_invariant; +DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 0; +EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref); static bool supports_scale_freq_counters(const struct cpumask *cpus) { @@ -148,57 +154,54 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, per_cpu(arch_freq_scale, i) = scale; } -DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; +DEFINE_PER_CPU(unsigned long, hw_pressure); -void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) +/** + * topology_update_hw_pressure() - Update HW pressure for CPUs + * @cpus : The related CPUs for which capacity has been reduced + * @capped_freq : The maximum allowed frequency that CPUs can run at + * + * Update the value of HW pressure for all @cpus in the mask. The + * cpumask should include all (online+offline) affected CPUs, to avoid + * operating on stale data when hot-plug is used for some CPUs. The + * @capped_freq reflects the currently allowed max CPUs frequency due to + * HW capping. It might be also a boost frequency value, which is bigger + * than the internal 'capacity_freq_ref' max frequency. In such case the + * pressure value should simply be removed, since this is an indication that + * there is no HW throttling. The @capped_freq must be provided in kHz. + */ +void topology_update_hw_pressure(const struct cpumask *cpus, + unsigned long capped_freq) { - per_cpu(cpu_scale, cpu) = capacity; -} + unsigned long max_capacity, capacity, pressure; + u32 max_freq; + int cpu; -DEFINE_PER_CPU(unsigned long, thermal_pressure); + cpu = cpumask_first(cpus); + max_capacity = arch_scale_cpu_capacity(cpu); + max_freq = arch_scale_freq_ref(cpu); -void topology_set_thermal_pressure(const struct cpumask *cpus, - unsigned long th_pressure) -{ - int cpu; + /* + * Handle properly the boost frequencies, which should simply clean + * the HW pressure value. + */ + if (max_freq <= capped_freq) + capacity = max_capacity; + else + capacity = mult_frac(max_capacity, capped_freq, max_freq); - for_each_cpu(cpu, cpus) - WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); -} + pressure = max_capacity - capacity; -static ssize_t cpu_capacity_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct cpu *cpu = container_of(dev, struct cpu, dev); + trace_hw_pressure_update(cpu, pressure); - return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); + for_each_cpu(cpu, cpus) + WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure); } +EXPORT_SYMBOL_GPL(topology_update_hw_pressure); static void update_topology_flags_workfn(struct work_struct *work); static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn); -static DEVICE_ATTR_RO(cpu_capacity); - -static int register_cpu_capacity_sysctl(void) -{ - int i; - struct device *cpu; - - for_each_possible_cpu(i) { - cpu = get_cpu_device(i); - if (!cpu) { - pr_err("%s: too early to get CPU%d device!\n", - __func__, i); - continue; - } - device_create_file(cpu, &dev_attr_cpu_capacity); - } - - return 0; -} -subsys_initcall(register_cpu_capacity_sysctl); - static int update_topology; int topology_update_cpu_topology(void) @@ -218,7 +221,6 @@ static void update_topology_flags_workfn(struct work_struct *work) update_topology = 0; } -static DEFINE_PER_CPU(u32, freq_factor) = 1; static u32 *raw_capacity; static int free_raw_capacity(void) @@ -240,13 +242,15 @@ void topology_normalize_cpu_scale(void) capacity_scale = 1; for_each_possible_cpu(cpu) { - capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); + capacity = raw_capacity[cpu] * + (per_cpu(capacity_freq_ref, cpu) ?: 1); capacity_scale = max(capacity, capacity_scale); } pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale); for_each_possible_cpu(cpu) { - capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); + capacity = raw_capacity[cpu] * + (per_cpu(capacity_freq_ref, cpu) ?: 1); capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, capacity_scale); topology_set_cpu_scale(cpu, capacity); @@ -282,15 +286,15 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) cpu_node, raw_capacity[cpu]); /* - * Update freq_factor for calculating early boot cpu capacities. + * Update capacity_freq_ref for calculating early boot CPU capacities. * For non-clk CPU DVFS mechanism, there's no way to get the * frequency value now, assuming they are running at the same - * frequency (by keeping the initial freq_factor value). + * frequency (by keeping the initial capacity_freq_ref value). */ cpu_clk = of_clk_get(cpu_node, 0); - if (!PTR_ERR_OR_ZERO(cpu_clk)) { - per_cpu(freq_factor, cpu) = - clk_get_rate(cpu_clk) / 1000; + if (!IS_ERR_OR_NULL(cpu_clk)) { + per_cpu(capacity_freq_ref, cpu) = + clk_get_rate(cpu_clk) / HZ_PER_KHZ; clk_put(cpu_clk); } } else { @@ -306,6 +310,70 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) return !ret; } +void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate) +{ +} + +#ifdef CONFIG_ACPI_CPPC_LIB +#include <acpi/cppc_acpi.h> + +static inline void topology_init_cpu_capacity_cppc(void) +{ + u64 capacity, capacity_scale = 0; + struct cppc_perf_caps perf_caps; + int cpu; + + if (likely(!acpi_cpc_valid())) + return; + + raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity), + GFP_KERNEL); + if (!raw_capacity) + return; + + for_each_possible_cpu(cpu) { + if (!cppc_get_perf_caps(cpu, &perf_caps) && + (perf_caps.highest_perf >= perf_caps.nominal_perf) && + (perf_caps.highest_perf >= perf_caps.lowest_perf)) { + raw_capacity[cpu] = perf_caps.highest_perf; + capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]); + + per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]); + + pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n", + cpu, raw_capacity[cpu]); + continue; + } + + pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu); + pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); + goto exit; + } + + for_each_possible_cpu(cpu) { + freq_inv_set_max_ratio(cpu, + per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); + + capacity = raw_capacity[cpu]; + capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, + capacity_scale); + topology_set_cpu_scale(cpu, capacity); + pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", + cpu, topology_get_cpu_scale(cpu)); + } + + schedule_work(&update_topology_flags_work); + pr_debug("cpu_capacity: cpu_capacity initialization done\n"); + +exit: + free_raw_capacity(); +} +void acpi_processor_init_invariance_cppc(void) +{ + topology_init_cpu_capacity_cppc(); +} +#endif + #ifdef CONFIG_CPU_FREQ static cpumask_var_t cpus_to_visit; static void parsing_done_workfn(struct work_struct *work); @@ -319,9 +387,6 @@ init_cpu_capacity_callback(struct notifier_block *nb, struct cpufreq_policy *policy = data; int cpu; - if (!raw_capacity) - return 0; - if (val != CPUFREQ_CREATE_POLICY) return 0; @@ -331,13 +396,18 @@ init_cpu_capacity_callback(struct notifier_block *nb, cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); - for_each_cpu(cpu, policy->related_cpus) - per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000; + for_each_cpu(cpu, policy->related_cpus) { + per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq; + freq_inv_set_max_ratio(cpu, + per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); + } if (cpumask_empty(cpus_to_visit)) { - topology_normalize_cpu_scale(); - schedule_work(&update_topology_flags_work); - free_raw_capacity(); + if (raw_capacity) { + topology_normalize_cpu_scale(); + schedule_work(&update_topology_flags_work); + free_raw_capacity(); + } pr_debug("cpu_capacity: parsing done\n"); schedule_work(&parsing_done_work); } @@ -354,11 +424,10 @@ static int __init register_cpufreq_notifier(void) int ret; /* - * on ACPI-based systems we need to use the default cpu capacity - * until we have the necessary code to parse the cpu capacity, so - * skip registering cpufreq notifier. + * On ACPI-based systems skip registering cpufreq notifier as cpufreq + * information is not needed for cpu capacity initialization. */ - if (!acpi_disabled || !raw_capacity) + if (!acpi_disabled) return -EINVAL; if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) @@ -388,6 +457,10 @@ core_initcall(free_raw_capacity); #endif #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) + +/* Used to enable the SMT control */ +static unsigned int max_smt_thread_num = 1; + /* * This function returns the logic cpu number of the node. * There are basically three kinds of return values: @@ -400,10 +473,10 @@ core_initcall(free_raw_capacity); */ static int __init get_cpu_for_node(struct device_node *node) { - struct device_node *cpu_node; int cpu; + struct device_node *cpu_node __free(device_node) = + of_parse_phandle(node, "cpu", 0); - cpu_node = of_parse_phandle(node, "cpu", 0); if (!cpu_node) return -1; @@ -414,38 +487,40 @@ static int __init get_cpu_for_node(struct device_node *node) pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n", cpu_node, cpumask_pr_args(cpu_possible_mask)); - of_node_put(cpu_node); return cpu; } static int __init parse_core(struct device_node *core, int package_id, - int core_id) + int cluster_id, int core_id) { char name[20]; bool leaf = true; int i = 0; int cpu; - struct device_node *t; do { snprintf(name, sizeof(name), "thread%d", i); - t = of_get_child_by_name(core, name); - if (t) { - leaf = false; - cpu = get_cpu_for_node(t); - if (cpu >= 0) { - cpu_topology[cpu].package_id = package_id; - cpu_topology[cpu].core_id = core_id; - cpu_topology[cpu].thread_id = i; - } else if (cpu != -ENODEV) { - pr_err("%pOF: Can't get CPU for thread\n", t); - of_node_put(t); - return -EINVAL; - } - of_node_put(t); + struct device_node *t __free(device_node) = + of_get_child_by_name(core, name); + + if (!t) + break; + + leaf = false; + cpu = get_cpu_for_node(t); + if (cpu >= 0) { + cpu_topology[cpu].package_id = package_id; + cpu_topology[cpu].cluster_id = cluster_id; + cpu_topology[cpu].core_id = core_id; + cpu_topology[cpu].thread_id = i; + } else if (cpu != -ENODEV) { + pr_err("%pOF: Can't get CPU for thread\n", t); + return -EINVAL; } i++; - } while (t); + } while (1); + + max_smt_thread_num = max_t(unsigned int, max_smt_thread_num, i); cpu = get_cpu_for_node(core); if (cpu >= 0) { @@ -456,6 +531,7 @@ static int __init parse_core(struct device_node *core, int package_id, } cpu_topology[cpu].package_id = package_id; + cpu_topology[cpu].cluster_id = cluster_id; cpu_topology[cpu].core_id = core_id; } else if (leaf && cpu != -ENODEV) { pr_err("%pOF: Can't get CPU for leaf core\n", core); @@ -465,13 +541,12 @@ static int __init parse_core(struct device_node *core, int package_id, return 0; } -static int __init parse_cluster(struct device_node *cluster, int depth) +static int __init parse_cluster(struct device_node *cluster, int package_id, + int cluster_id, int depth) { char name[20]; bool leaf = true; bool has_cores = false; - struct device_node *c; - static int package_id __initdata; int core_id = 0; int i, ret; @@ -483,63 +558,103 @@ static int __init parse_cluster(struct device_node *cluster, int depth) i = 0; do { snprintf(name, sizeof(name), "cluster%d", i); - c = of_get_child_by_name(cluster, name); - if (c) { - leaf = false; - ret = parse_cluster(c, depth + 1); - of_node_put(c); - if (ret != 0) - return ret; - } + struct device_node *c __free(device_node) = + of_get_child_by_name(cluster, name); + + if (!c) + break; + + leaf = false; + ret = parse_cluster(c, package_id, i, depth + 1); + if (depth > 0) + pr_warn("Topology for clusters of clusters not yet supported\n"); + if (ret != 0) + return ret; i++; - } while (c); + } while (1); /* Now check for cores */ i = 0; do { snprintf(name, sizeof(name), "core%d", i); - c = of_get_child_by_name(cluster, name); - if (c) { - has_cores = true; - - if (depth == 0) { - pr_err("%pOF: cpu-map children should be clusters\n", - c); - of_node_put(c); - return -EINVAL; - } + struct device_node *c __free(device_node) = + of_get_child_by_name(cluster, name); - if (leaf) { - ret = parse_core(c, package_id, core_id++); - } else { - pr_err("%pOF: Non-leaf cluster with core %s\n", - cluster, name); - ret = -EINVAL; - } + if (!c) + break; + + has_cores = true; + + if (depth == 0) { + pr_err("%pOF: cpu-map children should be clusters\n", c); + return -EINVAL; + } - of_node_put(c); + if (leaf) { + ret = parse_core(c, package_id, cluster_id, core_id++); if (ret != 0) return ret; + } else { + pr_err("%pOF: Non-leaf cluster with core %s\n", + cluster, name); + return -EINVAL; } + i++; - } while (c); + } while (1); if (leaf && !has_cores) pr_warn("%pOF: empty cluster\n", cluster); - if (leaf) + return 0; +} + +static int __init parse_socket(struct device_node *socket) +{ + char name[20]; + bool has_socket = false; + int package_id = 0, ret; + + do { + snprintf(name, sizeof(name), "socket%d", package_id); + struct device_node *c __free(device_node) = + of_get_child_by_name(socket, name); + + if (!c) + break; + + has_socket = true; + ret = parse_cluster(c, package_id, -1, 0); + if (ret != 0) + return ret; + package_id++; + } while (1); - return 0; + if (!has_socket) + ret = parse_cluster(socket, 0, -1, 0); + + /* + * Reset the max_smt_thread_num to 1 on failure. Since on failure + * we need to notify the framework the SMT is not supported, but + * max_smt_thread_num can be initialized to the SMT thread number + * of the cores which are successfully parsed. + */ + if (ret) + max_smt_thread_num = 1; + + cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num); + + return ret; } static int __init parse_dt_topology(void) { - struct device_node *cn, *map; int ret = 0; int cpu; + struct device_node *cn __free(device_node) = + of_find_node_by_path("/cpus"); - cn = of_find_node_by_path("/cpus"); if (!cn) { pr_err("No CPU information found in DT\n"); return 0; @@ -549,13 +664,15 @@ static int __init parse_dt_topology(void) * When topology is provided cpu-map is essentially a root * cluster with restricted subnodes. */ - map = of_get_child_by_name(cn, "cpu-map"); + struct device_node *map __free(device_node) = + of_get_child_by_name(cn, "cpu-map"); + if (!map) - goto out; + return ret; - ret = parse_cluster(map, 0); + ret = parse_socket(map); if (ret != 0) - goto out_map; + return ret; topology_normalize_cpu_scale(); @@ -564,13 +681,10 @@ static int __init parse_dt_topology(void) * only mark cores described in the DT as possible. */ for_each_possible_cpu(cpu) - if (cpu_topology[cpu].package_id == -1) - ret = -EINVAL; + if (cpu_topology[cpu].package_id < 0) { + return -EINVAL; + } -out_map: - of_node_put(map); -out: - of_node_put(cn); return ret; } #endif @@ -590,24 +704,51 @@ const struct cpumask *cpu_coregroup_mask(int cpu) /* not numa in package, lets use the package siblings */ core_mask = &cpu_topology[cpu].core_sibling; } - if (cpu_topology[cpu].llc_id != -1) { + + if (last_level_cache_is_valid(cpu)) { if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) core_mask = &cpu_topology[cpu].llc_sibling; } + /* + * For systems with no shared cpu-side LLC but with clusters defined, + * extend core_mask to cluster_siblings. The sched domain builder will + * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled. + */ + if (IS_ENABLED(CONFIG_SCHED_CLUSTER) && + cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling)) + core_mask = &cpu_topology[cpu].cluster_sibling; + return core_mask; } +const struct cpumask *cpu_clustergroup_mask(int cpu) +{ + /* + * Forbid cpu_clustergroup_mask() to span more or the same CPUs as + * cpu_coregroup_mask(). + */ + if (cpumask_subset(cpu_coregroup_mask(cpu), + &cpu_topology[cpu].cluster_sibling)) + return topology_sibling_cpumask(cpu); + + return &cpu_topology[cpu].cluster_sibling; +} + void update_siblings_masks(unsigned int cpuid) { struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; - int cpu; + int cpu, ret; + + ret = detect_cache_attributes(cpuid); + if (ret && ret != -ENOENT) + pr_info("Early cacheinfo allocation failed, ret = %d\n", ret); /* update core and thread sibling masks */ for_each_online_cpu(cpu) { cpu_topo = &cpu_topology[cpu]; - if (cpuid_topo->llc_id == cpu_topo->llc_id) { + if (last_level_cache_is_shared(cpu, cpuid)) { cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); } @@ -618,6 +759,14 @@ void update_siblings_masks(unsigned int cpuid) cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); + if (cpuid_topo->cluster_id != cpu_topo->cluster_id) + continue; + + if (cpuid_topo->cluster_id >= 0) { + cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling); + cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling); + } + if (cpuid_topo->core_id != cpu_topo->core_id) continue; @@ -633,6 +782,9 @@ static void clear_cpu_topology(int cpu) cpumask_clear(&cpu_topo->llc_sibling); cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); + cpumask_clear(&cpu_topo->cluster_sibling); + cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling); + cpumask_clear(&cpu_topo->core_sibling); cpumask_set_cpu(cpu, &cpu_topo->core_sibling); cpumask_clear(&cpu_topo->thread_sibling); @@ -648,8 +800,8 @@ void __init reset_cpu_topology(void) cpu_topo->thread_id = -1; cpu_topo->core_id = -1; + cpu_topo->cluster_id = -1; cpu_topo->package_id = -1; - cpu_topo->llc_id = -1; clear_cpu_topology(cpu); } @@ -663,29 +815,158 @@ void remove_cpu_topology(unsigned int cpu) cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); for_each_cpu(sibling, topology_sibling_cpumask(cpu)) cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); + for_each_cpu(sibling, topology_cluster_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling)); for_each_cpu(sibling, topology_llc_cpumask(cpu)) cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); clear_cpu_topology(cpu); } +#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) +struct cpu_smt_info { + unsigned int thread_num; + int core_id; +}; + +static bool __init acpi_cpu_is_threaded(int cpu) +{ + int is_threaded = acpi_pptt_cpu_is_thread(cpu); + + /* + * if the PPTT doesn't have thread information, check for architecture + * specific fallback if available + */ + if (is_threaded < 0) + is_threaded = arch_cpu_is_threaded(); + + return !!is_threaded; +} + +/* + * Propagate the topology information of the processor_topology_node tree to the + * cpu_topology array. + */ __weak int __init parse_acpi_topology(void) { + unsigned int max_smt_thread_num = 1; + struct cpu_smt_info *entry; + struct xarray hetero_cpu; + unsigned long hetero_id; + int cpu, topology_id; + + if (acpi_disabled) + return 0; + + xa_init(&hetero_cpu); + + for_each_possible_cpu(cpu) { + topology_id = find_acpi_cpu_topology(cpu, 0); + if (topology_id < 0) + return topology_id; + + if (acpi_cpu_is_threaded(cpu)) { + cpu_topology[cpu].thread_id = topology_id; + topology_id = find_acpi_cpu_topology(cpu, 1); + cpu_topology[cpu].core_id = topology_id; + + /* + * In the PPTT, CPUs below a node with the 'identical + * implementation' flag have the same number of threads. + * Count the number of threads for only one CPU (i.e. + * one core_id) among those with the same hetero_id. + * See the comment of find_acpi_cpu_topology_hetero_id() + * for more details. + * + * One entry is created for each node having: + * - the 'identical implementation' flag + * - its parent not having the flag + */ + hetero_id = find_acpi_cpu_topology_hetero_id(cpu); + entry = xa_load(&hetero_cpu, hetero_id); + if (!entry) { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + WARN_ON_ONCE(!entry); + + if (entry) { + entry->core_id = topology_id; + entry->thread_num = 1; + xa_store(&hetero_cpu, hetero_id, + entry, GFP_KERNEL); + } + } else if (entry->core_id == topology_id) { + entry->thread_num++; + } + } else { + cpu_topology[cpu].thread_id = -1; + cpu_topology[cpu].core_id = topology_id; + } + topology_id = find_acpi_cpu_topology_cluster(cpu); + cpu_topology[cpu].cluster_id = topology_id; + topology_id = find_acpi_cpu_topology_package(cpu); + cpu_topology[cpu].package_id = topology_id; + } + + /* + * This is a short loop since the number of XArray elements is the + * number of heterogeneous CPU clusters. On a homogeneous system + * there's only one entry in the XArray. + */ + xa_for_each(&hetero_cpu, hetero_id, entry) { + max_smt_thread_num = max(max_smt_thread_num, entry->thread_num); + xa_erase(&hetero_cpu, hetero_id); + kfree(entry); + } + + cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num); + xa_destroy(&hetero_cpu); return 0; } -#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) void __init init_cpu_topology(void) { + int cpu, ret; + reset_cpu_topology(); + ret = parse_acpi_topology(); + if (!ret) + ret = of_have_populated_dt() && parse_dt_topology(); - /* - * Discard anything that was parsed if we hit an error so we - * don't use partial information. - */ - if (parse_acpi_topology()) - reset_cpu_topology(); - else if (of_have_populated_dt() && parse_dt_topology()) + if (ret) { + /* + * Discard anything that was parsed if we hit an error so we + * don't use partial information. But do not return yet to give + * arch-specific early cache level detection a chance to run. + */ reset_cpu_topology(); + } + + for_each_possible_cpu(cpu) { + ret = fetch_cache_info(cpu); + if (!ret) + continue; + else if (ret != -ENOENT) + pr_err("Early cacheinfo failed, ret = %d\n", ret); + return; + } +} + +void store_cpu_topology(unsigned int cpuid) +{ + struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; + + if (cpuid_topo->package_id != -1) + goto topology_populated; + + cpuid_topo->thread_id = -1; + cpuid_topo->core_id = cpuid; + cpuid_topo->package_id = cpu_to_node(cpuid); + + pr_debug("CPU%u: package %d core %d thread %d\n", + cpuid, cpuid_topo->package_id, cpuid_topo->core_id, + cpuid_topo->thread_id); + +topology_populated: + update_siblings_masks(cpuid); } #endif |
