diff options
| author | Anup Patel <apatel@ventanamicro.com> | 2025-11-17 21:19:10 -0700 |
|---|---|---|
| committer | Paul Walmsley <pjw@kernel.org> | 2025-11-19 09:19:27 -0700 |
| commit | 231fb999a9acd17b1335e79f0fd6fc627353a6bc (patch) | |
| tree | 50d41eb63528974b6c6d669189ad109490cbeb1c | |
| parent | 4427259cc7f7571a157fbc9b5011e1ef6fe0a4a8 (diff) | |
RISC-V: Enable HOTPLUG_PARALLEL for secondary CPUs
The core kernel already supports parallel bringup of secondary
CPUs (aka HOTPLUG_PARALLEL). The x86 and MIPS architectures
already use HOTPLUG_PARALLEL and ARM is also moving toward it.
On RISC-V, there is no arch specific global data accessed in the
RISC-V secondary CPU bringup path so enabling HOTPLUG_PARALLEL for
RISC-V would only require:
1) Providing RISC-V specific arch_cpuhp_kick_ap_alive()
2) Calling cpuhp_ap_sync_alive() from smp_callin()
This patch is tested natively with OpenSBI on QEMU RV64 virt machine
with 64 cores and also tested with KVM RISC-V guest with 32 VCPUs.
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Link: https://patch.msgid.link/20250905122512.71684-1-apatel@ventanamicro.com
Signed-off-by: Paul Walmsley <pjw@kernel.org>
| -rw-r--r-- | arch/riscv/Kconfig | 2 | ||||
| -rw-r--r-- | arch/riscv/kernel/smpboot.c | 15 |
2 files changed, 16 insertions, 1 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index fadec20b87a8..34c68bb3b857 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -198,7 +198,7 @@ config RISCV select HAVE_SAMPLE_FTRACE_DIRECT_MULTI select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS - select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU + select HOTPLUG_PARALLEL if HOTPLUG_CPU select IRQ_DOMAIN select IRQ_FORCED_THREADING select KASAN_VMALLOC if KASAN diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 601a321e0f17..d85916a3660c 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -39,7 +39,9 @@ #include "head.h" +#ifndef CONFIG_HOTPLUG_PARALLEL static DECLARE_COMPLETION(cpu_running); +#endif void __init smp_prepare_cpus(unsigned int max_cpus) { @@ -179,6 +181,12 @@ static int start_secondary_cpu(int cpu, struct task_struct *tidle) return -EOPNOTSUPP; } +#ifdef CONFIG_HOTPLUG_PARALLEL +int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle) +{ + return start_secondary_cpu(cpu, tidle); +} +#else int __cpu_up(unsigned int cpu, struct task_struct *tidle) { int ret = 0; @@ -199,6 +207,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) return ret; } +#endif void __init smp_cpus_done(unsigned int max_cpus) { @@ -225,6 +234,10 @@ asmlinkage __visible void smp_callin(void) mmgrab(mm); current->active_mm = mm; +#ifdef CONFIG_HOTPLUG_PARALLEL + cpuhp_ap_sync_alive(); +#endif + store_cpu_topology(curr_cpuid); notify_cpu_starting(curr_cpuid); @@ -243,7 +256,9 @@ asmlinkage __visible void smp_callin(void) */ local_flush_icache_all(); local_flush_tlb_all(); +#ifndef CONFIG_HOTPLUG_PARALLEL complete(&cpu_running); +#endif /* * Disable preemption before enabling interrupts, so we don't try to * schedule a CPU that hasn't actually started yet. |
