diff options
Diffstat (limited to 'arch/x86/kvm/x86.h')
| -rw-r--r-- | arch/x86/kvm/x86.h | 16 |
1 files changed, 14 insertions, 2 deletions
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index f3dc77f006f9..fdab0ad49098 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -420,6 +420,20 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) return !(kvm->arch.disabled_quirks & quirk); } +static __always_inline void kvm_request_l1tf_flush_l1d(void) +{ +#if IS_ENABLED(CONFIG_CPU_MITIGATIONS) && IS_ENABLED(CONFIG_KVM_INTEL) + /* + * Use a raw write to set the per-CPU flag, as KVM will ensure a flush + * even if preemption is currently enabled.. If the current vCPU task + * is migrated to a different CPU (or userspace runs the vCPU on a + * different task) before the next VM-Entry, then kvm_arch_vcpu_load() + * will request a flush on the new CPU. + */ + raw_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1); +#endif +} + void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); u64 get_kvmclock_ns(struct kvm *kvm); @@ -622,8 +636,6 @@ static inline void kvm_machine_check(void) #endif } -void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); -void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); int kvm_spec_ctrl_test_value(u64 value); int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, struct x86_exception *e); |
