summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/paravirt-spinlocks.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/paravirt-spinlocks.c')
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 8f2d1c9d43a8..9e1ea99ad9df 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Split spinlock implementation out into its own file, so it can be
* compiled in a FTRACE-compatible way.
@@ -16,7 +17,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
bool pv_is_native_spin_unlock(void)
{
- return pv_lock_ops.queued_spin_unlock.func ==
+ return pv_ops.lock.queued_spin_unlock.func ==
__raw_callee_save___native_queued_spin_unlock;
}
@@ -28,17 +29,15 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
bool pv_is_native_vcpu_is_preempted(void)
{
- return pv_lock_ops.vcpu_is_preempted.func ==
+ return pv_ops.lock.vcpu_is_preempted.func ==
__raw_callee_save___native_vcpu_is_preempted;
}
-struct pv_lock_ops pv_lock_ops = {
-#ifdef CONFIG_SMP
- .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
- .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
- .wait = paravirt_nop,
- .kick = paravirt_nop,
- .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
-#endif /* SMP */
-};
-EXPORT_SYMBOL(pv_lock_ops);
+void __init paravirt_set_cap(void)
+{
+ if (!pv_is_native_spin_unlock())
+ setup_force_cpu_cap(X86_FEATURE_PVUNLOCK);
+
+ if (!pv_is_native_vcpu_is_preempted())
+ setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT);
+}