diff options
Diffstat (limited to 'arch/arm/kernel/topology.c')
| -rw-r--r-- | arch/arm/kernel/topology.c | 210 |
1 files changed, 70 insertions, 140 deletions
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index c5a59546a256..2336ee2aa44a 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -11,7 +11,9 @@ * for more details. */ +#include <linux/arch_topology.h> #include <linux/cpu.h> +#include <linux/cpufreq.h> #include <linux/cpumask.h> #include <linux/export.h> #include <linux/init.h> @@ -20,37 +22,29 @@ #include <linux/nodemask.h> #include <linux/of.h> #include <linux/sched.h> +#include <linux/sched/topology.h> #include <linux/slab.h> +#include <linux/string.h> +#include <asm/cpu.h> #include <asm/cputype.h> #include <asm/topology.h> /* - * cpu power scale management + * cpu capacity scale management */ /* - * cpu power table + * cpu capacity table * This per cpu data structure describes the relative capacity of each core. * On a heteregenous system, cores don't have the same computation capacity - * and we reflect that difference in the cpu_power field so the scheduler can - * take this difference into account during load balance. A per cpu structure - * is preferred because each CPU updates its own cpu_power field during the - * load balance except for idle cores. One idle core is selected to run the - * rebalance_domains for all idle cores and the cpu_power can be updated - * during this sequence. + * and we reflect that difference in the cpu_capacity field so the scheduler + * can take this difference into account during load balance. A per cpu + * structure is preferred because each CPU updates its own cpu_capacity field + * during the load balance except for idle cores. One idle core is selected + * to run the sched_balance_domains for all idle cores and the cpu_capacity can be + * updated during this sequence. */ -static DEFINE_PER_CPU(unsigned long, cpu_scale); - -unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) -{ - return per_cpu(cpu_scale, cpu); -} - -static void set_power_scale(unsigned int cpu, unsigned long power) -{ - per_cpu(cpu_scale, cpu) = power; -} #ifdef CONFIG_OF struct cpu_efficiency { @@ -62,53 +56,61 @@ struct cpu_efficiency { * Table of relative efficiency of each processors * The efficiency value must fit in 20bit and the final * cpu_scale value must be in the range - * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2 + * 0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2 * in order to return at most 1 when DIV_ROUND_CLOSEST * is used to compute the capacity of a CPU. * Processors that are not defined in the table, - * use the default SCHED_POWER_SCALE value for cpu_scale. + * use the default SCHED_CAPACITY_SCALE value for cpu_scale. */ -struct cpu_efficiency table_efficiency[] = { +static const struct cpu_efficiency table_efficiency[] = { {"arm,cortex-a15", 3891}, {"arm,cortex-a7", 2048}, {NULL, }, }; -struct cpu_capacity { - unsigned long hwid; - unsigned long capacity; -}; +static unsigned long *__cpu_capacity; +#define cpu_capacity(cpu) __cpu_capacity[cpu] -struct cpu_capacity *cpu_capacity; - -unsigned long middle_capacity = 1; +static unsigned long middle_capacity = 1; +static bool cap_from_dt = true; /* * Iterate all CPUs' descriptor in DT and compute the efficiency * (as per table_efficiency). Also calculate a middle efficiency * as close as possible to (max{eff_i} - min{eff_i}) / 2 - * This is later used to scale the cpu_power field such that an - * 'average' CPU is of middle power. Also see the comments near - * table_efficiency[] and update_cpu_power(). + * This is later used to scale the cpu_capacity field such that an + * 'average' CPU is of middle capacity. Also see the comments near + * table_efficiency[] and update_cpu_capacity(). */ static void __init parse_dt_topology(void) { - struct cpu_efficiency *cpu_eff; + const struct cpu_efficiency *cpu_eff; struct device_node *cn = NULL; - unsigned long min_capacity = (unsigned long)(-1); + unsigned long min_capacity = ULONG_MAX; unsigned long max_capacity = 0; unsigned long capacity = 0; - int alloc_size, cpu = 0; + int cpu = 0; - alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity); - cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT); + __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity), + GFP_NOWAIT); - while ((cn = of_find_node_by_type(cn, "cpu"))) { - const u32 *rate, *reg; + for_each_possible_cpu(cpu) { + const __be32 *rate; int len; - if (cpu >= num_possible_cpus()) - break; + /* too early to use cpu->of_node */ + cn = of_get_cpu_node(cpu, NULL); + if (!cn) { + pr_err("missing device node for CPU %d\n", cpu); + continue; + } + + if (topology_parse_cpu_capacity(cn, cpu)) { + of_node_put(cn); + continue; + } + + cap_from_dt = false; for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) if (of_device_is_compatible(cn, cpu_eff->compatible)) @@ -119,14 +121,7 @@ static void __init parse_dt_topology(void) rate = of_get_property(cn, "clock-frequency", &len); if (!rate || len != 4) { - pr_err("%s missing clock-frequency property\n", - cn->full_name); - continue; - } - - reg = of_get_property(cn, "reg", &len); - if (!reg || len != 4) { - pr_err("%s missing reg property\n", cn->full_name); + pr_err("%pOF missing clock-frequency property\n", cn); continue; } @@ -140,29 +135,25 @@ static void __init parse_dt_topology(void) if (capacity > max_capacity) max_capacity = capacity; - cpu_capacity[cpu].capacity = capacity; - cpu_capacity[cpu++].hwid = be32_to_cpup(reg); + cpu_capacity(cpu) = capacity; } - if (cpu < num_possible_cpus()) - cpu_capacity[cpu].hwid = (unsigned long)(-1); - /* If min and max capacities are equals, we bypass the update of the * cpu_scale because all CPUs have the same capacity. Otherwise, we * compute a middle_capacity factor that will ensure that the capacity * of an 'average' CPU of the system will be as close as possible to - * SCHED_POWER_SCALE, which is the default value, but with the + * SCHED_CAPACITY_SCALE, which is the default value, but with the * constraint explained near table_efficiency[]. */ - if (min_capacity == max_capacity) - cpu_capacity[0].hwid = (unsigned long)(-1); - else if (4*max_capacity < (3*(max_capacity + min_capacity))) + if (4*max_capacity < (3*(max_capacity + min_capacity))) middle_capacity = (min_capacity + max_capacity) - >> (SCHED_POWER_SHIFT+1); + >> (SCHED_CAPACITY_SHIFT+1); else middle_capacity = ((max_capacity / 3) - >> (SCHED_POWER_SHIFT-1)) + 1; + >> (SCHED_CAPACITY_SHIFT-1)) + 1; + if (cap_from_dt) + topology_normalize_cpu_scale(); } /* @@ -170,70 +161,22 @@ static void __init parse_dt_topology(void) * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the * function returns directly for SMP system. */ -void update_cpu_power(unsigned int cpu, unsigned long hwid) +static void update_cpu_capacity(unsigned int cpu) { - unsigned int idx = 0; - - /* look for the cpu's hwid in the cpu capacity table */ - for (idx = 0; idx < num_possible_cpus(); idx++) { - if (cpu_capacity[idx].hwid == hwid) - break; - - if (cpu_capacity[idx].hwid == -1) - return; - } - - if (idx == num_possible_cpus()) + if (!cpu_capacity(cpu) || cap_from_dt) return; - set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity); + topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity); - printk(KERN_INFO "CPU%u: update cpu_power %lu\n", - cpu, arch_scale_freq_power(NULL, cpu)); + pr_info("CPU%u: update cpu_capacity %lu\n", + cpu, topology_get_cpu_scale(cpu)); } #else static inline void parse_dt_topology(void) {} -static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} +static inline void update_cpu_capacity(unsigned int cpuid) {} #endif - /* - * cpu topology table - */ -struct cputopo_arm cpu_topology[NR_CPUS]; -EXPORT_SYMBOL_GPL(cpu_topology); - -const struct cpumask *cpu_coregroup_mask(int cpu) -{ - return &cpu_topology[cpu].core_sibling; -} - -void update_siblings_masks(unsigned int cpuid) -{ - struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; - int cpu; - - /* update core and thread sibling masks */ - for_each_possible_cpu(cpu) { - cpu_topo = &cpu_topology[cpu]; - - if (cpuid_topo->socket_id != cpu_topo->socket_id) - continue; - - cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); - - if (cpuid_topo->core_id != cpu_topo->core_id) - continue; - - cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); - } - smp_wmb(); -} - /* * store_cpu_topology is called at boot when only one cpu is running * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, @@ -241,12 +184,11 @@ void update_siblings_masks(unsigned int cpuid) */ void store_cpu_topology(unsigned int cpuid) { - struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; + struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; unsigned int mpidr; - /* If the cpu topology has been already set, just return */ - if (cpuid_topo->core_id != -1) - return; + if (cpuid_topo->package_id != -1) + goto topology_populated; mpidr = read_cpuid_mpidr(); @@ -261,12 +203,12 @@ void store_cpu_topology(unsigned int cpuid) /* core performance interdependency */ cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); - cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); + cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); } else { /* largely independent cores */ cpuid_topo->thread_id = -1; cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); + cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); } } else { /* @@ -276,17 +218,18 @@ void store_cpu_topology(unsigned int cpuid) */ cpuid_topo->thread_id = -1; cpuid_topo->core_id = 0; - cpuid_topo->socket_id = -1; + cpuid_topo->package_id = -1; } - update_siblings_masks(cpuid); - - update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK); + update_cpu_capacity(cpuid); - printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", + pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id, cpu_topology[cpuid].core_id, - cpu_topology[cpuid].socket_id, mpidr); + cpu_topology[cpuid].package_id, mpidr); + +topology_populated: + update_siblings_masks(cpuid); } /* @@ -295,20 +238,7 @@ void store_cpu_topology(unsigned int cpuid) */ void __init init_cpu_topology(void) { - unsigned int cpu; - - /* init core mask and power*/ - for_each_possible_cpu(cpu) { - struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); - - cpu_topo->thread_id = -1; - cpu_topo->core_id = -1; - cpu_topo->socket_id = -1; - cpumask_clear(&cpu_topo->core_sibling); - cpumask_clear(&cpu_topo->thread_sibling); - - set_power_scale(cpu, SCHED_POWER_SCALE); - } + reset_cpu_topology(); smp_wmb(); parse_dt_topology(); |
