summaryrefslogtreecommitdiff
path: root/arch/riscv/kernel/smpboot.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kernel/smpboot.c')
-rw-r--r--arch/riscv/kernel/smpboot.c35
1 files changed, 27 insertions, 8 deletions
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 1319b29ce3b5..d85916a3660c 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -39,7 +39,9 @@
#include "head.h"
+#ifndef CONFIG_HOTPLUG_PARALLEL
static DECLARE_COMPLETION(cpu_running);
+#endif
void __init smp_prepare_cpus(unsigned int max_cpus)
{
@@ -96,7 +98,6 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un
if (hart == cpuid_to_hartid_map(0)) {
BUG_ON(found_boot_cpu);
found_boot_cpu = true;
- early_map_cpu_to_node(0, acpi_numa_get_nid(cpu_count));
return 0;
}
@@ -106,7 +107,6 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un
}
cpuid_to_hartid_map(cpu_count) = hart;
- early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count));
cpu_count++;
return 0;
@@ -181,6 +181,12 @@ static int start_secondary_cpu(int cpu, struct task_struct *tidle)
return -EOPNOTSUPP;
}
+#ifdef CONFIG_HOTPLUG_PARALLEL
+int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
+{
+ return start_secondary_cpu(cpu, tidle);
+}
+#else
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int ret = 0;
@@ -201,6 +207,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
return ret;
}
+#endif
void __init smp_cpus_done(unsigned int max_cpus)
{
@@ -214,24 +221,34 @@ asmlinkage __visible void smp_callin(void)
struct mm_struct *mm = &init_mm;
unsigned int curr_cpuid = smp_processor_id();
+ if (has_vector()) {
+ /*
+ * Return as early as possible so the hart with a mismatching
+ * vlen won't boot.
+ */
+ if (riscv_v_setup_vsize())
+ return;
+ }
+
/* All kernel threads share the same mm context. */
mmgrab(mm);
current->active_mm = mm;
+#ifdef CONFIG_HOTPLUG_PARALLEL
+ cpuhp_ap_sync_alive();
+#endif
+
store_cpu_topology(curr_cpuid);
notify_cpu_starting(curr_cpuid);
riscv_ipi_enable();
numa_add_cpu(curr_cpuid);
- set_cpu_online(curr_cpuid, true);
- if (has_vector()) {
- if (riscv_v_setup_vsize())
- elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
- }
+ pr_debug("CPU%u: Booted secondary hartid %lu\n", curr_cpuid,
+ cpuid_to_hartid_map(curr_cpuid));
- riscv_user_isa_enable();
+ set_cpu_online(curr_cpuid, true);
/*
* Remote cache and TLB flushes are ignored while the CPU is offline,
@@ -239,7 +256,9 @@ asmlinkage __visible void smp_callin(void)
*/
local_flush_icache_all();
local_flush_tlb_all();
+#ifndef CONFIG_HOTPLUG_PARALLEL
complete(&cpu_running);
+#endif
/*
* Disable preemption before enabling interrupts, so we don't try to
* schedule a CPU that hasn't actually started yet.