summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/spinlock.h
diff options
context:
space:
mode:
authorChristopher M. Riedl <cmr@informatik.wtf>2019-08-12 22:13:12 -0500
committerMichael Ellerman <mpe@ellerman.id.au>2019-08-27 13:03:36 +1000
commitd57b78353a99f5d813248954dd7d0527a01751ac (patch)
tree70177d1eaa526076ac2e87bac05a5e01772869e5 /arch/powerpc/include/asm/spinlock.h
parentd7fb5b18a540efaf05da2b980fc11d50ba775677 (diff)
powerpc/spinlocks: Refactor SHARED_PROCESSOR
Determining if a processor is in shared processor mode is not a constant so don't hide it behind a #define. Signed-off-by: Christopher M. Riedl <cmr@informatik.wtf> Reviewed-by: Andrew Donnellan <ajd@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20190813031314.1828-2-cmr@informatik.wtf
Diffstat (limited to 'arch/powerpc/include/asm/spinlock.h')
-rw-r--r--arch/powerpc/include/asm/spinlock.h24
1 files changed, 18 insertions, 6 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index a47f827bc5f1..e9c60fbcc8fe 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -101,15 +101,27 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
#if defined(CONFIG_PPC_SPLPAR)
/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
extern void __spin_yield(arch_spinlock_t *lock);
extern void __rw_yield(arch_rwlock_t *lock);
#else /* SPLPAR */
#define __spin_yield(x) barrier()
#define __rw_yield(x) barrier()
-#define SHARED_PROCESSOR 0
#endif
+static inline bool is_shared_processor(void)
+{
+/*
+ * LPPACA is only available on Pseries so guard anything LPPACA related to
+ * allow other platforms (which include this common header) to compile.
+ */
+#ifdef CONFIG_PPC_PSERIES
+ return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
+ lppaca_shared_proc(local_paca->lppaca_ptr));
+#else
+ return false;
+#endif
+}
+
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
while (1) {
@@ -117,7 +129,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
break;
do {
HMT_low();
- if (SHARED_PROCESSOR)
+ if (is_shared_processor())
__spin_yield(lock);
} while (unlikely(lock->slock != 0));
HMT_medium();
@@ -136,7 +148,7 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
local_irq_restore(flags);
do {
HMT_low();
- if (SHARED_PROCESSOR)
+ if (is_shared_processor())
__spin_yield(lock);
} while (unlikely(lock->slock != 0));
HMT_medium();
@@ -226,7 +238,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
break;
do {
HMT_low();
- if (SHARED_PROCESSOR)
+ if (is_shared_processor())
__rw_yield(rw);
} while (unlikely(rw->lock < 0));
HMT_medium();
@@ -240,7 +252,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
break;
do {
HMT_low();
- if (SHARED_PROCESSOR)
+ if (is_shared_processor())
__rw_yield(rw);
} while (unlikely(rw->lock != 0));
HMT_medium();