diff options
Diffstat (limited to 'arch/x86/kvm/svm/svm.c')
-rw-r--r-- | arch/x86/kvm/svm/svm.c | 51 |
1 files changed, 14 insertions, 37 deletions
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 9c707dd7349a..0827e0c25309 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -95,6 +95,7 @@ static const struct svm_direct_access_msrs { #endif { .index = MSR_IA32_SPEC_CTRL, .always = false }, { .index = MSR_IA32_PRED_CMD, .always = false }, + { .index = MSR_IA32_FLUSH_CMD, .always = false }, { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, { .index = MSR_IA32_LASTINTFROMIP, .always = false }, @@ -2869,32 +2870,10 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) return 0; } -static int svm_set_msr_ia32_cmd(struct kvm_vcpu *vcpu, struct msr_data *msr, - bool guest_has_feat, u64 cmd, - int x86_feature_bit) -{ - struct vcpu_svm *svm = to_svm(vcpu); - - if (!msr->host_initiated && !guest_has_feat) - return 1; - - if (!(msr->data & ~cmd)) - return 1; - if (!boot_cpu_has(x86_feature_bit)) - return 1; - if (!msr->data) - return 0; - - wrmsrl(msr->index, cmd); - set_msr_interception(vcpu, svm->msrpm, msr->index, 0, 1); - - return 0; -} - static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct vcpu_svm *svm = to_svm(vcpu); - int r; + int ret = 0; u32 ecx = msr->index; u64 data = msr->data; @@ -2964,16 +2943,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) */ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); break; - case MSR_IA32_PRED_CMD: - r = svm_set_msr_ia32_cmd(vcpu, msr, - guest_has_pred_cmd_msr(vcpu), - PRED_CMD_IBPB, X86_FEATURE_IBPB); - break; - case MSR_IA32_FLUSH_CMD: - r = svm_set_msr_ia32_cmd(vcpu, msr, - guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D), - L1D_FLUSH, X86_FEATURE_FLUSH_L1D); - break; case MSR_AMD64_VIRT_SPEC_CTRL: if (!msr->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) @@ -3026,10 +2995,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) * guest via direct_access_msrs, and switch it via user return. */ preempt_disable(); - r = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull); + ret = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull); preempt_enable(); - if (r) - return 1; + if (ret) + break; svm->tsc_aux = data; break; @@ -3087,7 +3056,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) default: return kvm_set_msr_common(vcpu, msr); } - return 0; + return ret; } static int msr_interception(struct kvm_vcpu *vcpu) @@ -4168,6 +4137,14 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) svm_recalc_instruction_intercepts(vcpu, svm); + if (boot_cpu_has(X86_FEATURE_IBPB)) + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, + !!guest_has_pred_cmd_msr(vcpu)); + + if (boot_cpu_has(X86_FEATURE_FLUSH_L1D)) + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0, + !!guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D)); + /* For sev guests, the memory encryption bit is not reserved in CR3. */ if (sev_guest(vcpu->kvm)) { best = kvm_find_cpuid_entry(vcpu, 0x8000001F); |