diff options
Diffstat (limited to 'arch/x86/include/asm/kvm_para.h')
| -rw-r--r-- | arch/x86/include/asm/kvm_para.h | 109 |
1 files changed, 75 insertions, 34 deletions
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 695399f2d5eb..4a47c16e2df8 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -1,11 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_KVM_PARA_H #define _ASM_X86_KVM_PARA_H #include <asm/processor.h> +#include <asm/alternative.h> +#include <linux/interrupt.h> #include <uapi/asm/kvm_para.h> -extern void kvmclock_init(void); -extern int kvm_register_clock(char *txt); +#include <asm/tdx.h> #ifdef CONFIG_KVM_GUEST bool kvm_check_and_clear_guest_paused(void); @@ -16,10 +18,8 @@ static inline bool kvm_check_and_clear_guest_paused(void) } #endif /* CONFIG_KVM_GUEST */ -/* This instruction is vmcall. On non-VT architectures, it will generate a - * trap that we will then rewrite to the appropriate instruction. - */ -#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1" +#define KVM_HYPERCALL \ + ALTERNATIVE("vmcall", "vmmcall", X86_FEATURE_VMMCALL) /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall * instruction. The hypervisor may replace it with something else but only the @@ -34,6 +34,10 @@ static inline bool kvm_check_and_clear_guest_paused(void) static inline long kvm_hypercall0(unsigned int nr) { long ret; + + if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) + return tdx_kvm_hypercall(nr, 0, 0, 0, 0); + asm volatile(KVM_HYPERCALL : "=a"(ret) : "a"(nr) @@ -44,6 +48,10 @@ static inline long kvm_hypercall0(unsigned int nr) static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) { long ret; + + if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) + return tdx_kvm_hypercall(nr, p1, 0, 0, 0); + asm volatile(KVM_HYPERCALL : "=a"(ret) : "a"(nr), "b"(p1) @@ -55,6 +63,10 @@ static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, unsigned long p2) { long ret; + + if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) + return tdx_kvm_hypercall(nr, p1, p2, 0, 0); + asm volatile(KVM_HYPERCALL : "=a"(ret) : "a"(nr), "b"(p1), "c"(p2) @@ -66,6 +78,10 @@ static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, unsigned long p2, unsigned long p3) { long ret; + + if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) + return tdx_kvm_hypercall(nr, p1, p2, p3, 0); + asm volatile(KVM_HYPERCALL : "=a"(ret) : "a"(nr), "b"(p1), "c"(p2), "d"(p3) @@ -78,6 +94,10 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, unsigned long p4) { long ret; + + if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) + return tdx_kvm_hypercall(nr, p1, p2, p3, p4); + asm volatile(KVM_HYPERCALL : "=a"(ret) : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4) @@ -85,51 +105,72 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, return ret; } -static inline bool kvm_para_available(void) +static inline long kvm_sev_hypercall3(unsigned int nr, unsigned long p1, + unsigned long p2, unsigned long p3) { - unsigned int eax, ebx, ecx, edx; - char signature[13]; + long ret; + + asm volatile("vmmcall" + : "=a"(ret) + : "a"(nr), "b"(p1), "c"(p2), "d"(p3) + : "memory"); + return ret; +} - if (boot_cpu_data.cpuid_level < 0) - return false; /* So we don't blow up on old processors */ +#ifdef CONFIG_KVM_GUEST +void kvmclock_init(void); +void kvmclock_disable(void); +bool kvm_para_available(void); +unsigned int kvm_arch_para_features(void); +unsigned int kvm_arch_para_hints(void); +void kvm_async_pf_task_wait_schedule(u32 token); +u32 kvm_read_and_reset_apf_flags(void); +bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token); + +DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled); + +static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token) +{ + if (static_branch_unlikely(&kvm_async_pf_enabled)) + return __kvm_handle_async_pf(regs, token); + else + return false; +} - if (cpu_has_hypervisor) { - cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); - memcpy(signature + 0, &ebx, 4); - memcpy(signature + 4, &ecx, 4); - memcpy(signature + 8, &edx, 4); - signature[12] = 0; +#ifdef CONFIG_PARAVIRT_SPINLOCKS +void __init kvm_spinlock_init(void); +#else /* !CONFIG_PARAVIRT_SPINLOCKS */ +static inline void kvm_spinlock_init(void) +{ +} +#endif /* CONFIG_PARAVIRT_SPINLOCKS */ - if (strcmp(signature, "KVMKVMKVM") == 0) - return true; - } +#else /* CONFIG_KVM_GUEST */ +#define kvm_async_pf_task_wait_schedule(T) do {} while(0) +static inline bool kvm_para_available(void) +{ return false; } static inline unsigned int kvm_arch_para_features(void) { - return cpuid_eax(KVM_CPUID_FEATURES); + return 0; } -#ifdef CONFIG_KVM_GUEST -void __init kvm_guest_init(void); -void kvm_async_pf_task_wait(u32 token); -void kvm_async_pf_task_wake(u32 token); -u32 kvm_read_and_reset_pf_reason(void); -extern void kvm_disable_steal_time(void); -#else -#define kvm_guest_init() do { } while (0) -#define kvm_async_pf_task_wait(T) do {} while(0) -#define kvm_async_pf_task_wake(T) do {} while(0) -static inline u32 kvm_read_and_reset_pf_reason(void) +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + +static inline u32 kvm_read_and_reset_apf_flags(void) { return 0; } -static inline void kvm_disable_steal_time(void) +static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token) { - return; + return false; } #endif |
