From 830eec24673a982bff4df85ba4d17e4a6ff201a7 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 11 Dec 2018 22:23:22 +0100 Subject: ARM: versatile: convert boot_lock to raw The arm boot_lock is used by the secondary processor startup code. The locking task is the idle thread, which has idle->sched_class == &idle_sched_class. idle_sched_class->enqueue_task == NULL, so if the idle task blocks on the lock, the attempt to wake it when the lock becomes available will fail: try_to_wake_up() ... activate_task() enqueue_task() p->sched_class->enqueue_task(rq, p, flags) Fix by converting boot_lock to a raw spin lock. Cc: Linus Walleij Signed-off-by: Frank Rowand Link: http://lkml.kernel.org/r/4E77B952.3010606@am.sony.com Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Russell King --- arch/arm/plat-versatile/platsmp.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm/plat-versatile/platsmp.c') diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c index c2366510187a..6b60f582b738 100644 --- a/arch/arm/plat-versatile/platsmp.c +++ b/arch/arm/plat-versatile/platsmp.c @@ -32,7 +32,7 @@ static void write_pen_release(int val) sync_cache_w(&pen_release); } -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); void versatile_secondary_init(unsigned int cpu) { @@ -45,8 +45,8 @@ void versatile_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -57,7 +57,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) * Set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * This is really belt and braces; we hold unintended secondary @@ -87,7 +87,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } -- cgit From d9b778e7bf2d37251d7ce365b39ebe86332c7a10 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 13 Dec 2018 12:54:26 +0000 Subject: ARM: versatile: rename and comment SMP implementation Rename pen_release and boot_lock in the Versatile specific SMP implementation, describe why these exist and state clearly that they should not be used in production implementations. Signed-off-by: Russell King --- arch/arm/plat-versatile/platsmp.c | 47 +++++++++++++++++++++++++++------------ 1 file changed, 33 insertions(+), 14 deletions(-) (limited to 'arch/arm/plat-versatile/platsmp.c') diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c index 6b60f582b738..6e2836243187 100644 --- a/arch/arm/plat-versatile/platsmp.c +++ b/arch/arm/plat-versatile/platsmp.c @@ -7,6 +7,11 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. + * + * This code is specific to the hardware found on ARM Realview and + * Versatile Express platforms where the CPUs are unable to be individually + * woken, and where there is no way to hot-unplug CPUs. Real platforms + * should not copy this code. */ #include #include @@ -21,18 +26,32 @@ #include /* - * Write pen_release in a way that is guaranteed to be visible to all - * observers, irrespective of whether they're taking part in coherency + * versatile_cpu_release controls the release of CPUs from the holding + * pen in headsmp.S, which exists because we are not always able to + * control the release of individual CPUs from the board firmware. + * Production platforms do not need this. + */ +volatile int versatile_cpu_release = -1; + +/* + * Write versatile_cpu_release in a way that is guaranteed to be visible to + * all observers, irrespective of whether they're taking part in coherency * or not. This is necessary for the hotplug code to work reliably. */ -static void write_pen_release(int val) +static void versatile_write_cpu_release(int val) { - pen_release = val; + versatile_cpu_release = val; smp_wmb(); - sync_cache_w(&pen_release); + sync_cache_w(&versatile_cpu_release); } -static DEFINE_RAW_SPINLOCK(boot_lock); +/* + * versatile_lock exists to avoid running the loops_per_jiffy delay loop + * calibrations on the secondary CPU while the requesting CPU is using + * the limited-bandwidth bus - which affects the calibration value. + * Production platforms do not need this. + */ +static DEFINE_RAW_SPINLOCK(versatile_lock); void versatile_secondary_init(unsigned int cpu) { @@ -40,13 +59,13 @@ void versatile_secondary_init(unsigned int cpu) * let the primary processor know we're out of the * pen, then head off into the C entry point */ - write_pen_release(-1); + versatile_write_cpu_release(-1); /* * Synchronise with the boot thread. */ - raw_spin_lock(&boot_lock); - raw_spin_unlock(&boot_lock); + raw_spin_lock(&versatile_lock); + raw_spin_unlock(&versatile_lock); } int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -57,7 +76,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) * Set synchronisation state between this boot processor * and the secondary one */ - raw_spin_lock(&boot_lock); + raw_spin_lock(&versatile_lock); /* * This is really belt and braces; we hold unintended secondary @@ -65,7 +84,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) * since we haven't sent them a soft interrupt, they shouldn't * be there. */ - write_pen_release(cpu_logical_map(cpu)); + versatile_write_cpu_release(cpu_logical_map(cpu)); /* * Send the secondary CPU a soft interrupt, thereby causing @@ -77,7 +96,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); - if (pen_release == -1) + if (versatile_cpu_release == -1) break; udelay(10); @@ -87,7 +106,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - raw_spin_unlock(&boot_lock); + raw_spin_unlock(&versatile_lock); - return pen_release != -1 ? -ENOSYS : 0; + return versatile_cpu_release != -1 ? -ENOSYS : 0; } -- cgit