summaryrefslogtreecommitdiff
path: root/arch/riscv/kernel/smpboot.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kernel/smpboot.c')
-rw-r--r--arch/riscv/kernel/smpboot.c85
1 files changed, 43 insertions, 42 deletions
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index f4d6acb38dd0..d85916a3660c 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -25,6 +25,8 @@
#include <linux/of.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
+
+#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
@@ -37,16 +39,13 @@
#include "head.h"
+#ifndef CONFIG_HOTPLUG_PARALLEL
static DECLARE_COMPLETION(cpu_running);
-
-void __init smp_prepare_boot_cpu(void)
-{
-}
+#endif
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int cpuid;
- int ret;
unsigned int curr_cpuid;
init_cpu_topology();
@@ -63,11 +62,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for_each_possible_cpu(cpuid) {
if (cpuid == curr_cpuid)
continue;
- if (cpu_ops[cpuid]->cpu_prepare) {
- ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
- if (ret)
- continue;
- }
set_cpu_present(cpuid, true);
numa_store_cpu_info(cpuid);
}
@@ -104,7 +98,6 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un
if (hart == cpuid_to_hartid_map(0)) {
BUG_ON(found_boot_cpu);
found_boot_cpu = true;
- early_map_cpu_to_node(0, acpi_numa_get_nid(cpu_count));
return 0;
}
@@ -114,7 +107,6 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un
}
cpuid_to_hartid_map(cpu_count) = hart;
- early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count));
cpu_count++;
return 0;
@@ -122,18 +114,7 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un
static void __init acpi_parse_and_init_cpus(void)
{
- int cpuid;
-
- cpu_set_ops(0);
-
acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, acpi_parse_rintc, 0);
-
- for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
- if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
- cpu_set_ops(cpuid);
- set_cpu_possible(cpuid, true);
- }
- }
}
#else
#define acpi_parse_and_init_cpus(...) do { } while (0)
@@ -147,8 +128,6 @@ static void __init of_parse_and_init_cpus(void)
int cpuid = 1;
int rc;
- cpu_set_ops(0);
-
for_each_of_cpu_node(dn) {
rc = riscv_early_of_processor_hartid(dn, &hart);
if (rc < 0)
@@ -176,31 +155,38 @@ static void __init of_parse_and_init_cpus(void)
if (cpuid > nr_cpu_ids)
pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
cpuid, nr_cpu_ids);
-
- for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
- if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
- cpu_set_ops(cpuid);
- set_cpu_possible(cpuid, true);
- }
- }
}
void __init setup_smp(void)
{
+ int cpuid;
+
+ cpu_set_ops();
+
if (acpi_disabled)
of_parse_and_init_cpus();
else
acpi_parse_and_init_cpus();
+
+ for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++)
+ if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID)
+ set_cpu_possible(cpuid, true);
}
static int start_secondary_cpu(int cpu, struct task_struct *tidle)
{
- if (cpu_ops[cpu]->cpu_start)
- return cpu_ops[cpu]->cpu_start(cpu, tidle);
+ if (cpu_ops->cpu_start)
+ return cpu_ops->cpu_start(cpu, tidle);
return -EOPNOTSUPP;
}
+#ifdef CONFIG_HOTPLUG_PARALLEL
+int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
+{
+ return start_secondary_cpu(cpu, tidle);
+}
+#else
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int ret = 0;
@@ -221,6 +207,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
return ret;
}
+#endif
void __init smp_cpus_done(unsigned int max_cpus)
{
@@ -234,30 +221,44 @@ asmlinkage __visible void smp_callin(void)
struct mm_struct *mm = &init_mm;
unsigned int curr_cpuid = smp_processor_id();
+ if (has_vector()) {
+ /*
+ * Return as early as possible so the hart with a mismatching
+ * vlen won't boot.
+ */
+ if (riscv_v_setup_vsize())
+ return;
+ }
+
/* All kernel threads share the same mm context. */
mmgrab(mm);
current->active_mm = mm;
+#ifdef CONFIG_HOTPLUG_PARALLEL
+ cpuhp_ap_sync_alive();
+#endif
+
store_cpu_topology(curr_cpuid);
notify_cpu_starting(curr_cpuid);
riscv_ipi_enable();
numa_add_cpu(curr_cpuid);
- set_cpu_online(curr_cpuid, 1);
- probe_vendor_features(curr_cpuid);
- if (has_vector()) {
- if (riscv_v_setup_vsize())
- elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
- }
+ pr_debug("CPU%u: Booted secondary hartid %lu\n", curr_cpuid,
+ cpuid_to_hartid_map(curr_cpuid));
+
+ set_cpu_online(curr_cpuid, true);
/*
- * Remote TLB flushes are ignored while the CPU is offline, so emit
- * a local TLB flush right now just in case.
+ * Remote cache and TLB flushes are ignored while the CPU is offline,
+ * so flush them both right now just in case.
*/
+ local_flush_icache_all();
local_flush_tlb_all();
+#ifndef CONFIG_HOTPLUG_PARALLEL
complete(&cpu_running);
+#endif
/*
* Disable preemption before enabling interrupts, so we don't try to
* schedule a CPU that hasn't actually started yet.