summaryrefslogtreecommitdiff
path: root/arch/mips
diff options
context:
space:
mode:
authorVladimir Kondratiev <vladimir.kondratiev@intel.com>2019-11-24 16:07:31 +0200
committerPaul Burton <paulburton@kernel.org>2019-11-26 10:33:08 -0800
commit3b1313eb32c499d46dc4c3e896d19d9564c879c4 (patch)
treeae5ba376a2a50f3d814a23caa46d5bc0c5fca239 /arch/mips
parenta8d0f11ee50ddbd9f243c7a8b1a393a4f23ba093 (diff)
mips: cacheinfo: report shared CPU map
Report L1 caches as shared per core; L2 - per cluster. This fixes "perf" that went crazy if shared_cpu_map attribute not reported on sysfs, in form of /sys/devices/system/cpu/cpu*/cache/index*/shared_cpu_list /sys/devices/system/cpu/cpu*/cache/index*/shared_cpu_map Signed-off-by: Vladimir Kondratiev <vladimir.kondratiev@intel.com> Signed-off-by: Paul Burton <paulburton@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: James Hogan <jhogan@kernel.org> Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/kernel/cacheinfo.c27
1 files changed, 26 insertions, 1 deletions
diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
index f777e44653d5..47312c529410 100644
--- a/arch/mips/kernel/cacheinfo.c
+++ b/arch/mips/kernel/cacheinfo.c
@@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu)
return 0;
}
+static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
+{
+ int cpu1;
+
+ for_each_possible_cpu(cpu1)
+ if (cpus_are_siblings(cpu, cpu1))
+ cpumask_set_cpu(cpu1, cpu_map);
+}
+
+static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
+{
+ int cpu1;
+ int cluster = cpu_cluster(&cpu_data[cpu]);
+
+ for_each_possible_cpu(cpu1)
+ if (cpu_cluster(&cpu_data[cpu1]) == cluster)
+ cpumask_set_cpu(cpu1, cpu_map);
+}
+
static int __populate_cache_leaves(unsigned int cpu)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu)
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
if (c->icache.waysize) {
+ /* L1 caches are per core */
+ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA);
+ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST);
} else {
populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED);
}
- if (c->scache.waysize)
+ if (c->scache.waysize) {
+ /* L2 cache is per cluster */
+ fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED);
+ }
if (c->tcache.waysize)
populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);