summaryrefslogtreecommitdiff
path: root/kernel/smp.c
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs>2016-03-09 17:55:36 -0800
committerIngo Molnar <mingo@kernel.org>2016-03-10 10:28:35 +0100
commit38460a2178d225b39ade5ac66586c3733391cf86 (patch)
tree189a70cb0386f09af432c2580b03c404bc6e24b3 /kernel/smp.c
parent90d1098478fb08a1ef166fe91622d8046869e17b (diff)
locking/csd_lock: Use smp_cond_acquire() in csd_lock_wait()
We can micro-optimize this call and mildly relax the barrier requirements by relying on ctrl + rmb, keeping the acquire semantics. In addition, this is pretty much the now standard for busy-waiting under such restraints. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: dave@stgolabs.net Link: http://lkml.kernel.org/r/1457574936-19065-3-git-send-email-dbueso@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/smp.c')
-rw-r--r--kernel/smp.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index 5099db15c5fb..300d29391e07 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -107,8 +107,7 @@ void __init call_function_init(void)
*/
static __always_inline void csd_lock_wait(struct call_single_data *csd)
{
- while (smp_load_acquire(&csd->flags) & CSD_FLAG_LOCK)
- cpu_relax();
+ smp_cond_acquire(!(csd->flags & CSD_FLAG_LOCK));
}
static __always_inline void csd_lock(struct call_single_data *csd)