summaryrefslogtreecommitdiff
path: root/arch/loongarch/include/asm/qspinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch/include/asm/qspinlock.h')
-rw-r--r--arch/loongarch/include/asm/qspinlock.h41
1 files changed, 32 insertions, 9 deletions
diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
index 34f43f8ad591..e76d3aa1e1eb 100644
--- a/arch/loongarch/include/asm/qspinlock.h
+++ b/arch/loongarch/include/asm/qspinlock.h
@@ -1,18 +1,41 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_QSPINLOCK_H
-#define _ASM_QSPINLOCK_H
+#ifndef _ASM_LOONGARCH_QSPINLOCK_H
+#define _ASM_LOONGARCH_QSPINLOCK_H
-#include <asm-generic/qspinlock_types.h>
+#include <linux/jump_label.h>
-#define queued_spin_unlock queued_spin_unlock
+#ifdef CONFIG_PARAVIRT
-static inline void queued_spin_unlock(struct qspinlock *lock)
+DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
+
+#define virt_spin_lock virt_spin_lock
+
+static inline bool virt_spin_lock(struct qspinlock *lock)
{
- compiletime_assert_atomic_type(lock->locked);
- c_sync();
- WRITE_ONCE(lock->locked, 0);
+ int val;
+
+ if (!static_branch_unlikely(&virt_spin_lock_key))
+ return false;
+
+ /*
+ * On hypervisors without PARAVIRT_SPINLOCKS support we fall
+ * back to a Test-and-Set spinlock, because fair locks have
+ * horrible lock 'holder' preemption issues.
+ */
+
+__retry:
+ val = atomic_read(&lock->val);
+
+ if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
+ cpu_relax();
+ goto __retry;
+ }
+
+ return true;
}
+#endif /* CONFIG_PARAVIRT */
+
#include <asm-generic/qspinlock.h>
-#endif /* _ASM_QSPINLOCK_H */
+#endif // _ASM_LOONGARCH_QSPINLOCK_H