summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/setup-common.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/setup-common.c')
-rw-r--r--arch/powerpc/kernel/setup-common.c85
1 files changed, 57 insertions, 28 deletions
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 9b142b9d5187..a08b0ede4e64 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -67,6 +67,7 @@
#include <asm/cpu_has_feature.h>
#include <asm/kasan.h>
#include <asm/mce.h>
+#include <asm/systemcfg.h>
#include "setup.h"
@@ -85,6 +86,7 @@ EXPORT_SYMBOL(machine_id);
int boot_cpuid = -1;
EXPORT_SYMBOL_GPL(boot_cpuid);
+int __initdata boot_core_hwid = -1;
#ifdef CONFIG_PPC64
int boot_cpu_hwid = -1;
@@ -109,7 +111,7 @@ int ppc_do_canonicalize_irqs;
EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
#endif
-#ifdef CONFIG_CRASH_CORE
+#ifdef CONFIG_CRASH_DUMP
/* This keeps a track of which one is the crashing cpu. */
int crashing_cpu = -1;
#endif
@@ -404,13 +406,32 @@ static void __init cpu_init_thread_core_maps(int tpc)
cpumask_set_cpu(i, &threads_core_mask);
printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
- tpc, tpc > 1 ? "s" : "");
+ tpc, str_plural(tpc));
printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift);
}
u32 *cpu_to_phys_id = NULL;
+static int assign_threads(unsigned int cpu, unsigned int nthreads, bool present,
+ const __be32 *hw_ids)
+{
+ for (int i = 0; i < nthreads && cpu < nr_cpu_ids; i++) {
+ __be32 hwid;
+
+ hwid = be32_to_cpu(hw_ids[i]);
+
+ DBG(" thread %d -> cpu %d (hard id %d)\n", i, cpu, hwid);
+
+ set_cpu_present(cpu, present);
+ set_cpu_possible(cpu, true);
+ cpu_to_phys_id[cpu] = hwid;
+ cpu++;
+ }
+
+ return cpu;
+}
+
/**
* setup_cpu_maps - initialize the following cpu maps:
* cpu_possible_mask
@@ -437,16 +458,13 @@ void __init smp_setup_cpu_maps(void)
DBG("smp_setup_cpu_maps()\n");
- cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
+ cpu_to_phys_id = memblock_alloc_or_panic(nr_cpu_ids * sizeof(u32),
__alignof__(u32));
- if (!cpu_to_phys_id)
- panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
- __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32));
for_each_node_by_type(dn, "cpu") {
const __be32 *intserv;
__be32 cpu_be;
- int j, len;
+ int len;
DBG(" * %pOF...\n", dn);
@@ -468,27 +486,31 @@ void __init smp_setup_cpu_maps(void)
nthreads = len / sizeof(int);
- for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
- bool avail;
-
- DBG(" thread %d -> cpu %d (hard id %d)\n",
- j, cpu, be32_to_cpu(intserv[j]));
+ bool avail = of_device_is_available(dn);
+ if (!avail)
+ avail = !of_property_match_string(dn,
+ "enable-method", "spin-table");
- avail = of_device_is_available(dn);
- if (!avail)
- avail = !of_property_match_string(dn,
- "enable-method", "spin-table");
-
- set_cpu_present(cpu, avail);
- set_cpu_possible(cpu, true);
- cpu_to_phys_id[cpu] = be32_to_cpu(intserv[j]);
- cpu++;
- }
+ if (boot_core_hwid >= 0) {
+ if (cpu == 0) {
+ pr_info("Skipping CPU node %pOF to allow for boot core.\n", dn);
+ cpu = nthreads;
+ continue;
+ }
- if (cpu >= nr_cpu_ids) {
+ if (be32_to_cpu(intserv[0]) == boot_core_hwid) {
+ pr_info("Renumbered boot core %pOF to logical 0\n", dn);
+ assign_threads(0, nthreads, avail, intserv);
+ of_node_put(dn);
+ break;
+ }
+ } else if (cpu >= nr_cpu_ids) {
of_node_put(dn);
break;
}
+
+ if (cpu < nr_cpu_ids)
+ cpu = assign_threads(cpu, nthreads, avail, intserv);
}
/* If no SMT supported, nthreads is forced to 1 */
@@ -536,7 +558,9 @@ void __init smp_setup_cpu_maps(void)
out:
of_node_put(dn);
}
- vdso_data->processorCount = num_present_cpus();
+#endif
+#ifdef CONFIG_PPC64_PROC_SYSTEMCFG
+ systemcfg->processorCount = num_present_cpus();
#endif /* CONFIG_PPC64 */
/* Initialize CPU <=> thread mapping/
@@ -616,6 +640,8 @@ static __init void probe_machine(void)
DBG(" %s ...\n", machine_id->name);
if (machine_id->compatible && !of_machine_is_compatible(machine_id->compatible))
continue;
+ if (machine_id->compatibles && !of_machine_compatible_match(machine_id->compatibles))
+ continue;
memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls));
if (ppc_md.probe && !ppc_md.probe())
continue;
@@ -805,8 +831,8 @@ static int __init check_cache_coherency(void)
if (devtree_coherency != KERNEL_COHERENCY) {
printk(KERN_ERR
"kernel coherency:%s != device tree_coherency:%s\n",
- KERNEL_COHERENCY ? "on" : "off",
- devtree_coherency ? "on" : "off");
+ str_on_off(KERNEL_COHERENCY),
+ str_on_off(devtree_coherency));
BUG();
}
@@ -933,6 +959,7 @@ void __init setup_arch(char **cmdline_p)
mem_topology_setup();
/* Set max_mapnr before paging_init() */
set_max_mapnr(max_pfn);
+ high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
/*
* Release secondary cpus out of their spinloops at 0x60 now that
@@ -970,9 +997,11 @@ void __init setup_arch(char **cmdline_p)
initmem_init();
/*
- * Reserve large chunks of memory for use by CMA for KVM and hugetlb. These must
- * be called after initmem_init(), so that pageblock_order is initialised.
+ * Reserve large chunks of memory for use by CMA for fadump, KVM and
+ * hugetlb. These must be called after initmem_init(), so that
+ * pageblock_order is initialised.
*/
+ fadump_cma_init();
kvm_cma_reserve();
gigantic_hugetlb_cma_reserve();