summaryrefslogtreecommitdiff
path: root/kernel/smpboot.c
diff options
context:
space:
mode:
authorUros Bizjak <ubizjak@gmail.com>2022-08-25 16:56:03 +0200
committerAndrew Morton <akpm@linux-foundation.org>2022-09-11 21:55:10 -0700
commit9a15193e23b780d1da77e3db18698beb0637897d (patch)
tree789af806d1dfa280b4c6ef05d8dd5038fe855b89 /kernel/smpboot.c
parent5fdfa161b2043001f82cbce49e87e8e9f581d510 (diff)
smpboot: use atomic_try_cmpxchg in cpu_wait_death and cpu_report_death
Use atomic_try_cmpxchg instead of atomic_cmpxchg (*ptr, old, new) == old in cpu_wait_death and cpu_report_death. x86 CMPXCHG instruction returns success in ZF flag, so this change saves a compare after cmpxchg (and related move instruction in front of cmpxchg). Also, atomic_try_cmpxchg implicitly assigns old *ptr value to "old" when cmpxchg fails, enabling further code simplifications. No functional change intended. Link: https://lkml.kernel.org/r/20220825145603.5811-1-ubizjak@gmail.com Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'kernel/smpboot.c')
-rw-r--r--kernel/smpboot.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index b9f54544e749..2c7396da470c 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -433,7 +433,7 @@ bool cpu_wait_death(unsigned int cpu, int seconds)
/* The outgoing CPU will normally get done quite quickly. */
if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
- goto update_state;
+ goto update_state_early;
udelay(5);
/* But if the outgoing CPU dawdles, wait increasingly long times. */
@@ -444,16 +444,17 @@ bool cpu_wait_death(unsigned int cpu, int seconds)
break;
sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
}
-update_state:
+update_state_early:
oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
+update_state:
if (oldstate == CPU_DEAD) {
/* Outgoing CPU died normally, update state. */
smp_mb(); /* atomic_read() before update. */
atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
} else {
/* Outgoing CPU still hasn't died, set state accordingly. */
- if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
- oldstate, CPU_BROKEN) != oldstate)
+ if (!atomic_try_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
+ &oldstate, CPU_BROKEN))
goto update_state;
ret = false;
}
@@ -475,14 +476,14 @@ bool cpu_report_death(void)
int newstate;
int cpu = smp_processor_id();
+ oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
do {
- oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
if (oldstate != CPU_BROKEN)
newstate = CPU_DEAD;
else
newstate = CPU_DEAD_FROZEN;
- } while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
- oldstate, newstate) != oldstate);
+ } while (!atomic_try_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
+ &oldstate, newstate));
return newstate == CPU_DEAD;
}