summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/smp.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2021-05-20 12:50:31 +0100
committerWill Deacon <will@kernel.org>2021-05-26 22:45:46 +0100
commit3d8c1a013d78f32ee266097496cbd89b734b5fcb (patch)
tree0d3c3057812148a28af6ba503a533e3acc7f9630 /arch/arm64/kernel/smp.c
parent8e334d729bc4787f728e9e5abc91649f131124ff (diff)
arm64: smp: initialize cpu offset earlier
Now that we have a consistent place to initialize CPU context registers early in the boot path, let's also initialize the per-cpu offset here. This makes the primary and secondary boot paths more consistent, and allows for the use of per-cpu operations earlier, which will be necessary for instrumentation with KCSAN. Note that smp_prepare_boot_cpu() still needs to re-initialize CPU0's offset as immediately prior to this the per-cpu areas may be reallocated, and hence the boot-time offset may be stale. A comment is added to make this clear. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Suzuki Poulose <suzuki.poulose@arm.com> Cc: Will Deacon <will@kernel.org> Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20210520115031.18509-7-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kernel/smp.c')
-rw-r--r--arch/arm64/kernel/smp.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 73625cc39574..2fe8fab886e2 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -198,10 +198,7 @@ asmlinkage notrace void secondary_start_kernel(void)
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm;
const struct cpu_operations *ops;
- unsigned int cpu;
-
- cpu = task_cpu(current);
- set_my_cpu_offset(per_cpu_offset(cpu));
+ unsigned int cpu = smp_processor_id();
/*
* All kernel threads share the same mm context; grab a
@@ -448,6 +445,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_boot_cpu(void)
{
+ /*
+ * The runtime per-cpu areas have been allocated by
+ * setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be
+ * freed shortly, so we must move over to the runtime per-cpu area.
+ */
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
cpuinfo_store_boot_cpu();