summaryrefslogtreecommitdiff
path: root/arch/parisc/kernel/topology.c
blob: 9696e3cb6a2a65668982a9be8c1e564ffb2b43d7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
/*
 * arch/parisc/kernel/topology.c
 *
 * Copyright (C) 2017 Helge Deller <deller@gmx.de>
 *
 * based on arch/arm/kernel/topology.c
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */

#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/sched/topology.h>
#include <linux/cpu.h>

#include <asm/topology.h>
#include <asm/sections.h>

static DEFINE_PER_CPU(struct cpu, cpu_devices);

static int dualcores_found;

/*
 * store_cpu_topology is called at boot when only one cpu is running
 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
 * which prevents simultaneous write access to cpu_topology array
 */
void store_cpu_topology(unsigned int cpuid)
{
	struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
	struct cpuinfo_parisc *p;
	int max_socket = -1;
	unsigned long cpu;

	/* If the cpu topology has been already set, just return */
	if (cpuid_topo->core_id != -1)
		return;

#ifdef CONFIG_HOTPLUG_CPU
	per_cpu(cpu_devices, cpuid).hotpluggable = 1;
#endif
	if (register_cpu(&per_cpu(cpu_devices, cpuid), cpuid))
		pr_warn("Failed to register CPU%d device", cpuid);

	/* create cpu topology mapping */
	cpuid_topo->thread_id = -1;
	cpuid_topo->core_id = 0;

	p = &per_cpu(cpu_data, cpuid);
	for_each_online_cpu(cpu) {
		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);

		if (cpu == cpuid) /* ignore current cpu */
			continue;

		if (cpuinfo->cpu_loc == p->cpu_loc) {
			cpuid_topo->core_id = cpu_topology[cpu].core_id;
			if (p->cpu_loc) {
				cpuid_topo->core_id++;
				cpuid_topo->package_id = cpu_topology[cpu].package_id;
				dualcores_found = 1;
				continue;
			}
		}

		if (cpuid_topo->package_id == -1)
			max_socket = max(max_socket, cpu_topology[cpu].package_id);
	}

	if (cpuid_topo->package_id == -1)
		cpuid_topo->package_id = max_socket + 1;

	update_siblings_masks(cpuid);

	pr_info("CPU%u: cpu core %d of socket %d\n",
		cpuid,
		cpu_topology[cpuid].core_id,
		cpu_topology[cpuid].package_id);
}

static struct sched_domain_topology_level parisc_mc_topology[] = {
#ifdef CONFIG_SCHED_MC
	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
#endif

	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
	{ NULL, },
};

/*
 * init_cpu_topology is called at boot when only one cpu is running
 * which prevent simultaneous write access to cpu_topology array
 */
void __init init_cpu_topology(void)
{
	/* Set scheduler topology descriptor */
	if (dualcores_found)
		set_sched_topology(parisc_mc_topology);
}