summaryrefslogtreecommitdiff
path: root/arch/riscv/kernel/smpboot.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kernel/smpboot.c')
-rw-r--r--arch/riscv/kernel/smpboot.c24
1 files changed, 19 insertions, 5 deletions
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 18cda0e8cf94..eb533b5c2c8c 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -39,6 +39,7 @@
void *__cpu_up_stack_pointer[NR_CPUS];
void *__cpu_up_task_pointer[NR_CPUS];
+static DECLARE_COMPLETION(cpu_running);
void __init smp_prepare_boot_cpu(void)
{
@@ -50,12 +51,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
void __init setup_smp(void)
{
- struct device_node *dn = NULL;
+ struct device_node *dn;
int hart;
bool found_boot_cpu = false;
int cpuid = 1;
- while ((dn = of_find_node_by_type(dn, "cpu"))) {
+ for_each_of_cpu_node(dn) {
hart = riscv_of_processor_hartid(dn);
if (hart < 0)
continue;
@@ -65,6 +66,11 @@ void __init setup_smp(void)
found_boot_cpu = 1;
continue;
}
+ if (cpuid >= NR_CPUS) {
+ pr_warn("Invalid cpuid [%d] for hartid [%d]\n",
+ cpuid, hart);
+ break;
+ }
cpuid_to_hartid_map(cpuid) = hart;
set_cpu_possible(cpuid, true);
@@ -77,6 +83,7 @@ void __init setup_smp(void)
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
+ int ret = 0;
int hartid = cpuid_to_hartid_map(cpu);
tidle->thread_info.cpu = cpu;
@@ -92,10 +99,16 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
task_stack_page(tidle) + THREAD_SIZE);
WRITE_ONCE(__cpu_up_task_pointer[hartid], tidle);
- while (!cpu_online(cpu))
- cpu_relax();
+ lockdep_assert_held(&cpu_running);
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(1000));
+
+ if (!cpu_online(cpu)) {
+ pr_crit("CPU%u: failed to come online\n", cpu);
+ ret = -EIO;
+ }
- return 0;
+ return ret;
}
void __init smp_cpus_done(unsigned int max_cpus)
@@ -121,6 +134,7 @@ asmlinkage void __init smp_callin(void)
* a local TLB flush right now just in case.
*/
local_flush_tlb_all();
+ complete(&cpu_running);
/*
* Disable preemption before enabling interrupts, so we don't try to
* schedule a CPU that hasn't actually started yet.