From ea55d5a2cf7c507a9ac03b41716bf1877edad153 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Mon, 10 Jul 2023 19:31:37 +0000 Subject: KVM: arm64: Delete pointless switch statement in kvm_reset_vcpu() The vCPU target hasn't mattered for quite a long time now. Delete the useless switch statement in kvm_reset_vcpu(), which hilariously only had a default case in it. Reviewed-by: Zenghui Yu Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230710193140.1706399-2-oliver.upton@linux.dev --- arch/arm64/kvm/reset.c | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index bc8556b6f459..7a65a35ee4ac 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -248,21 +248,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) } } - switch (vcpu->arch.target) { - default: - if (vcpu_el1_is_32bit(vcpu)) { - pstate = VCPU_RESET_PSTATE_SVC; - } else if (vcpu_has_nv(vcpu)) { - pstate = VCPU_RESET_PSTATE_EL2; - } else { - pstate = VCPU_RESET_PSTATE_EL1; - } - - if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) { - ret = -EINVAL; - goto out; - } - break; + if (vcpu_el1_is_32bit(vcpu)) + pstate = VCPU_RESET_PSTATE_SVC; + else if (vcpu_has_nv(vcpu)) + pstate = VCPU_RESET_PSTATE_EL2; + else + pstate = VCPU_RESET_PSTATE_EL1; + + if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) { + ret = -EINVAL; + goto out; } /* Reset core registers */ -- cgit From c8a67729b8a36a5f4857de645ee9808fc99d8618 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Mon, 10 Jul 2023 19:31:38 +0000 Subject: KVM: arm64: Remove pointless check for changed init target At any time there is only a single valid value for KVM_ARM_VCPU_INIT, depending on the current CPU implementation. In all likelihood, this will be the generic ARMv8 target. Drop the pointless check for a changed target value between calls to KVM_ARM_VCPU_INIT and instead rely on the check against kvm_target_cpu(). Signed-off-by: Oliver Upton Reviewed-by: Zenghui Yu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230710193140.1706399-3-oliver.upton@linux.dev --- arch/arm64/kvm/arm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index c2c14059f6a8..3f844934b9f3 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1212,8 +1212,7 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu, { unsigned long features = init->features[0]; - return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES) || - vcpu->arch.target != init->target; + return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); } static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, -- cgit From ef98406036769107d5c49a519b31c940910b98d3 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Mon, 10 Jul 2023 19:31:39 +0000 Subject: KVM: arm64: Replace vCPU target with a configuration flag The value of kvm_vcpu_arch::target has been used to determine if a vCPU has actually been initialized. Storing this as an integer is needless at this point, as KVM doesn't do any microarch-specific emulation in the first place. Instead, all we care about is whether or not the vCPU has been initialized. Delete the field in favor of a vCPU configuration flag indicating if KVM_ARM_VCPU_INIT has completed for the vCPU. Reviewed-by: Zenghui Yu Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230710193140.1706399-4-oliver.upton@linux.dev --- arch/arm64/include/asm/kvm_host.h | 5 +++-- arch/arm64/kvm/arm.c | 12 +++++------- arch/arm64/kvm/hyp/nvhe/switch.c | 2 +- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8b6096753740..b53105bccff9 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -567,8 +567,7 @@ struct kvm_vcpu_arch { /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; - /* Target CPU and feature flags */ - int target; + /* feature flags */ DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ @@ -669,6 +668,8 @@ struct kvm_vcpu_arch { #define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1)) /* PTRAUTH exposed to guest */ #define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2)) +/* KVM_ARM_VCPU_INIT completed */ +#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3)) /* Exception pending */ #define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0)) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 3f844934b9f3..13f1bde0bc2d 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -360,7 +360,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) #endif /* Force users to call KVM_ARM_VCPU_INIT */ - vcpu->arch.target = -1; + vcpu_clear_flag(vcpu, VCPU_INITIALIZED); bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; @@ -569,7 +569,7 @@ unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) { - return vcpu->arch.target >= 0; + return vcpu_get_flag(vcpu, VCPU_INITIALIZED); } /* @@ -1051,7 +1051,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) * invalid. The VMM can try and fix it by issuing a * KVM_ARM_VCPU_INIT if it really wants to. */ - vcpu->arch.target = -1; + vcpu_clear_flag(vcpu, VCPU_INITIALIZED); ret = ARM_EXCEPTION_IL; } @@ -1228,20 +1228,18 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, !bitmap_equal(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES)) goto out_unlock; - vcpu->arch.target = init->target; bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); /* Now we know what it is, we can reset it. */ ret = kvm_reset_vcpu(vcpu); if (ret) { - vcpu->arch.target = -1; bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); goto out_unlock; } bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES); set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags); - + vcpu_set_flag(vcpu, VCPU_INITIALIZED); out_unlock: mutex_unlock(&kvm->arch.config_lock); return ret; @@ -1259,7 +1257,7 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, if (ret) return ret; - if (vcpu->arch.target == -1) + if (!kvm_vcpu_initialized(vcpu)) return __kvm_vcpu_set_target(vcpu, init); if (kvm_vcpu_init_changed(vcpu, init)) diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 0a6271052def..b9caac3e7b1d 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -236,7 +236,7 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) * KVM_ARM_VCPU_INIT, however, this is likely not possible for * protected VMs. */ - vcpu->arch.target = -1; + vcpu_clear_flag(vcpu, VCPU_INITIALIZED); *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT); *exit_code |= ARM_EXCEPTION_IL; } -- cgit From 5346f7e13e5eb134920a14504b6900c6168dd16e Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Mon, 10 Jul 2023 19:31:40 +0000 Subject: KVM: arm64: Always return generic v8 as the preferred target Userspace selecting an implementation-specific vCPU target has been completely useless for a very long time. Let's go whole hog and start returning the generic v8 target across all implementations as the preferred target. Uphold the pre-existing behavior by tolerating either the generic target or an implementation-specific target if the vCPU happens to be running on one of the lucky few parts. Acked-by: Zenghui Yu Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230710193140.1706399-5-oliver.upton@linux.dev --- arch/arm64/include/asm/kvm_host.h | 1 - arch/arm64/kvm/arm.c | 9 +++++---- arch/arm64/kvm/guest.c | 15 --------------- 3 files changed, 5 insertions(+), 20 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index b53105bccff9..ed60277e834a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -898,7 +898,6 @@ struct kvm_vcpu_stat { u64 exits; }; -void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 13f1bde0bc2d..9ab17ecd76bb 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1250,7 +1250,8 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, { int ret; - if (init->target != kvm_target_cpu()) + if (init->target != KVM_ARM_TARGET_GENERIC_V8 && + init->target != kvm_target_cpu()) return -EINVAL; ret = kvm_vcpu_init_check_features(vcpu, init); @@ -1585,9 +1586,9 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); } case KVM_ARM_PREFERRED_TARGET: { - struct kvm_vcpu_init init; - - kvm_vcpu_preferred_target(&init); + struct kvm_vcpu_init init = { + .target = KVM_ARM_TARGET_GENERIC_V8, + }; if (copy_to_user(argp, &init, sizeof(init))) return -EFAULT; diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 20280a5233f6..95f6945c4432 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -884,21 +884,6 @@ u32 __attribute_const__ kvm_target_cpu(void) return KVM_ARM_TARGET_GENERIC_V8; } -void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) -{ - u32 target = kvm_target_cpu(); - - memset(init, 0, sizeof(*init)); - - /* - * For now, we don't return any features. - * In future, we might use features to return target - * specific features available for the preferred - * target type. - */ - init->target = (__u32)target; -} - int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -EINVAL; -- cgit From 1ba11daef0a9b062e40b5393d285c82ab6483730 Mon Sep 17 00:00:00 2001 From: Shaoqin Huang Date: Thu, 27 Jul 2023 05:07:54 -0400 Subject: KVM: arm64: Use the known cpu id instead of smp_processor_id() In kvm_arch_vcpu_load(), it has the parameter cpu which is the value of smp_processor_id(), so no need to get it again. Simply replace it. Signed-off-by: Shaoqin Huang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230727090754.1900310-1-shahuang@redhat.com --- arch/arm64/kvm/arm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 72dc53a75d1c..3c015bdd35ee 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -462,7 +462,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu_ptrauth_disable(vcpu); kvm_arch_vcpu_load_debug_state_flags(vcpu); - if (!cpumask_test_cpu(smp_processor_id(), vcpu->kvm->arch.supported_cpus)) + if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus)) vcpu_set_on_unsupported_cpu(vcpu); } -- cgit From e21f3905f98ff1f72b06614440c2be93fda58b44 Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Mon, 24 Jul 2023 22:22:57 +0800 Subject: KVM: arm64: Drop HCR_VIRT_EXCP_MASK This was introduced in commit 0369f6a34b9f ("arm64: KVM: EL2 register definitions") and for more than 10 years nobody used. Remove it. Signed-off-by: Zenghui Yu Reviewed-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230724142257.1551-1-yuzenghui@huawei.com --- arch/arm64/include/asm/kvm_arm.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 58e5eb27da68..d04ef89ca6a0 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -89,7 +89,6 @@ HCR_BSU_IS | HCR_FB | HCR_TACR | \ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3) -#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA) #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) -- cgit From b265ee7bae1192ecc55ecaf7e63c8ed84cc2f509 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Thu, 15 Jun 2023 16:37:49 +1000 Subject: KVM: SEV: move set_dr_intercepts/clr_dr_intercepts from the header Static functions set_dr_intercepts() and clr_dr_intercepts() are only called from SVM so move them to .c. No functional change intended. Signed-off-by: Alexey Kardashevskiy Reviewed-by: Carlos Bilbao Reviewed-by: Tom Lendacky Reviewed-by: Santosh Shukla Link: https://lore.kernel.org/r/20230615063757.3039121-2-aik@amd.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 42 ++++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/svm.h | 42 ------------------------------------------ 2 files changed, 42 insertions(+), 42 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index d381ad424554..42f17ae99c9e 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -677,6 +677,48 @@ free_save_area: } +static void set_dr_intercepts(struct vcpu_svm *svm) +{ + struct vmcb *vmcb = svm->vmcb01.ptr; + + if (!sev_es_guest(svm->vcpu.kvm)) { + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); + } + + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); + + recalc_intercepts(svm); +} + +static void clr_dr_intercepts(struct vcpu_svm *svm) +{ + struct vmcb *vmcb = svm->vmcb01.ptr; + + vmcb->control.intercepts[INTERCEPT_DR] = 0; + + /* DR7 access must remain intercepted for an SEV-ES guest */ + if (sev_es_guest(svm->vcpu.kvm)) { + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); + } + + recalc_intercepts(svm); +} + static int direct_access_msr_slot(u32 msr) { u32 i; diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 18af7e712a5a..800ca1776b59 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -404,48 +404,6 @@ static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u3 return test_bit(bit, (unsigned long *)&control->intercepts); } -static inline void set_dr_intercepts(struct vcpu_svm *svm) -{ - struct vmcb *vmcb = svm->vmcb01.ptr; - - if (!sev_es_guest(svm->vcpu.kvm)) { - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); - } - - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); - - recalc_intercepts(svm); -} - -static inline void clr_dr_intercepts(struct vcpu_svm *svm) -{ - struct vmcb *vmcb = svm->vmcb01.ptr; - - vmcb->control.intercepts[INTERCEPT_DR] = 0; - - /* DR7 access must remain intercepted for an SEV-ES guest */ - if (sev_es_guest(svm->vcpu.kvm)) { - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); - } - - recalc_intercepts(svm); -} - static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) { struct vmcb *vmcb = svm->vmcb01.ptr; -- cgit From 29de732cc95cb5219d8c2bd145cb7293cb572fc3 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Thu, 15 Jun 2023 16:37:50 +1000 Subject: KVM: SEV: Move SEV's GP_VECTOR intercept setup to SEV Currently SVM setup is done sequentially in init_vmcb() -> sev_init_vmcb() -> sev_es_init_vmcb() and tries keeping SVM/SEV/SEV-ES bits separated. One of the exceptions is #GP intercept which init_vmcb() skips setting for SEV guests and then sev_es_init_vmcb() needlessly clears it. Remove the SEV check from init_vmcb(). Clear the #GP intercept in sev_init_vmcb(). SEV-ES will use the SEV setting. No functional change intended. Suggested-by: Sean Christopherson Signed-off-by: Alexey Kardashevskiy Reviewed-by: Carlos Bilbao Reviewed-by: Tom Lendacky Reviewed-by: Santosh Shukla Link: https://lore.kernel.org/r/20230615063757.3039121-3-aik@amd.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/sev.c | 9 ++++++--- arch/x86/kvm/svm/svm.c | 5 ++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 07756b7348ae..fd6e316c1a23 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2974,9 +2974,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm) svm_set_intercept(svm, TRAP_CR4_WRITE); svm_set_intercept(svm, TRAP_CR8_WRITE); - /* No support for enable_vmware_backdoor */ - clr_exception_intercept(svm, GP_VECTOR); - /* Can't intercept XSETBV, HV can't modify XCR0 directly */ svm_clr_intercept(svm, INTERCEPT_XSETBV); @@ -3002,6 +2999,12 @@ void sev_init_vmcb(struct vcpu_svm *svm) svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; clr_exception_intercept(svm, UD_VECTOR); + /* + * Don't intercept #GP for SEV guests, e.g. for the VMware backdoor, as + * KVM can't decrypt guest memory to decode the faulting instruction. + */ + clr_exception_intercept(svm, GP_VECTOR); + if (sev_es_guest(svm->vcpu.kvm)) sev_es_init_vmcb(svm); } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 42f17ae99c9e..a0e803f2a574 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1243,10 +1243,9 @@ static void init_vmcb(struct kvm_vcpu *vcpu) * Guest access to VMware backdoor ports could legitimately * trigger #GP because of TSS I/O permission bitmap. * We intercept those #GP and allow access to them anyway - * as VMware does. Don't intercept #GP for SEV guests as KVM can't - * decrypt guest memory to decode the faulting instruction. + * as VMware does. */ - if (enable_vmware_backdoor && !sev_guest(vcpu->kvm)) + if (enable_vmware_backdoor) set_exception_intercept(svm, GP_VECTOR); svm_set_intercept(svm, INTERCEPT_INTR); -- cgit From f8d808ed1ba0019997f18ff2be0cf85e33a31022 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 15 Jun 2023 16:37:51 +1000 Subject: KVM: SVM: Rewrite sev_es_prepare_switch_to_guest()'s comment about swap types Rewrite the comment(s) in sev_es_prepare_switch_to_guest() to explain the swap types employed by the CPU for SEV-ES guests, i.e. to explain why KVM needs to save a seemingly random subset of host state, and to provide a decoder for the APM's Type-A/B/C terminology. Signed-off-by: Alexey Kardashevskiy Link: https://lore.kernel.org/r/20230615063757.3039121-4-aik@amd.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/sev.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index fd6e316c1a23..d58a25a92aa3 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3023,19 +3023,24 @@ void sev_es_vcpu_reset(struct vcpu_svm *svm) void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa) { /* - * As an SEV-ES guest, hardware will restore the host state on VMEXIT, - * of which one step is to perform a VMLOAD. KVM performs the - * corresponding VMSAVE in svm_prepare_guest_switch for both - * traditional and SEV-ES guests. + * All host state for SEV-ES guests is categorized into three swap types + * based on how it is handled by hardware during a world switch: + * + * A: VMRUN: Host state saved in host save area + * VMEXIT: Host state loaded from host save area + * + * B: VMRUN: Host state _NOT_ saved in host save area + * VMEXIT: Host state loaded from host save area + * + * C: VMRUN: Host state _NOT_ saved in host save area + * VMEXIT: Host state initialized to default(reset) values + * + * Manually save type-B state, i.e. state that is loaded by VMEXIT but + * isn't saved by VMRUN, that isn't already saved by VMSAVE (performed + * by common SVM code). */ - - /* XCR0 is restored on VMEXIT, save the current host value */ hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); - - /* PKRU is restored on VMEXIT, save the current host value */ hostsa->pkru = read_pkru(); - - /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */ hostsa->xss = host_xss; } -- cgit From 2837dd00f8fc69111cd6b1dc8481d2fb490d11c9 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Thu, 15 Jun 2023 16:37:52 +1000 Subject: KVM: SEV-ES: explicitly disable debug SVM/SEV enable debug registers intercepts to skip swapping DRs on entering/exiting the guest. When the guest is in control of debug registers (vcpu->guest_debug == 0), there is an optimisation to reduce the number of context switches: intercepts are cleared and the KVM_DEBUGREG_WONT_EXIT flag is set to tell KVM to do swapping on guest enter/exit. The same code also executes for SEV-ES, however it has no effect as - it always takes (vcpu->guest_debug == 0) branch; - KVM_DEBUGREG_WONT_EXIT is set but DR7 intercept is not cleared; - vcpu_enter_guest() writes DRs but VMRUN for SEV-ES swaps them with the values from _encrypted_ VMSA. Be explicit about SEV-ES not supporting debug: - return right away from dr_interception() and skip unnecessary processing; - return an error right away from the KVM_SEV_LAUNCH_UPDATE_VMSA handler if debugging was already enabled. KVM_SET_GUEST_DEBUG are failing already after KVM_SEV_LAUNCH_UPDATE_VMSA is finished due to vcpu->arch.guest_state_protected set to true. Add WARN_ON to kvm_x86::sync_dirty_debug_regs() (saves guest DRs on guest exit) to signify that SEV-ES won't hit that path. Suggested-by: Sean Christopherson Signed-off-by: Alexey Kardashevskiy Link: https://lore.kernel.org/r/20230615063757.3039121-5-aik@amd.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/sev.c | 5 +++++ arch/x86/kvm/svm/svm.c | 9 ++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index d58a25a92aa3..0299bbe188f1 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -619,6 +619,11 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, struct vcpu_svm *svm = to_svm(vcpu); int ret; + if (vcpu->guest_debug) { + pr_warn_once("KVM_SET_GUEST_DEBUG for SEV-ES guest is not supported"); + return -EINVAL; + } + /* Perform some pre-encryption checks against the VMSA */ ret = sev_es_sync_vmsa(svm); if (ret) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index a0e803f2a574..e838cfecc0fa 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1983,7 +1983,7 @@ static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - if (vcpu->arch.guest_state_protected) + if (WARN_ON_ONCE(sev_es_guest(vcpu->kvm))) return; get_debugreg(vcpu->arch.db[0], 0); @@ -2714,6 +2714,13 @@ static int dr_interception(struct kvm_vcpu *vcpu) unsigned long val; int err = 0; + /* + * SEV-ES intercepts DR7 only to disable guest debugging and the guest issues a VMGEXIT + * for DR7 write only. KVM cannot change DR7 (always swapped as type 'A') so return early. + */ + if (sev_es_guest(vcpu->kvm)) + return 1; + if (vcpu->guest_debug == 0) { /* * No more DR vmexits; force a reload of the debug registers -- cgit From c2690b5f01948dfec19d50086312848c0b5b02b0 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Thu, 15 Jun 2023 16:37:53 +1000 Subject: KVM: SVM/SEV/SEV-ES: Rework intercepts Currently SVM setup is done sequentially in init_vmcb() -> sev_init_vmcb() -> sev_es_init_vmcb() and tries keeping SVM/SEV/SEV-ES bits separated. One of the exceptions is DR intercepts which is for SEV-ES before sev_es_init_vmcb() runs. Move the SEV-ES intercept setup to sev_es_init_vmcb(). From now on set_dr_intercepts()/clr_dr_intercepts() handle SVM/SEV only. No functional change intended. Suggested-by: Sean Christopherson Signed-off-by: Alexey Kardashevskiy Reviewed-by: Santosh Shukla Reviewed-by: Tom Lendacky Link: https://lore.kernel.org/r/20230615063757.3039121-6-aik@amd.com [sean: drop comment about intercepting DR7] Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/sev.c | 6 ++++++ arch/x86/kvm/svm/svm.c | 37 ++++++++++++++----------------------- 2 files changed, 20 insertions(+), 23 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 0299bbe188f1..b3ef509f6fda 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2951,6 +2951,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) static void sev_es_init_vmcb(struct vcpu_svm *svm) { + struct vmcb *vmcb = svm->vmcb01.ptr; struct kvm_vcpu *vcpu = &svm->vcpu; svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE; @@ -2979,6 +2980,11 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm) svm_set_intercept(svm, TRAP_CR4_WRITE); svm_set_intercept(svm, TRAP_CR8_WRITE); + vmcb->control.intercepts[INTERCEPT_DR] = 0; + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); + recalc_intercepts(svm); + /* Can't intercept XSETBV, HV can't modify XCR0 directly */ svm_clr_intercept(svm, INTERCEPT_XSETBV); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index e838cfecc0fa..8fad9ebb5126 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -681,23 +681,20 @@ static void set_dr_intercepts(struct vcpu_svm *svm) { struct vmcb *vmcb = svm->vmcb01.ptr; - if (!sev_es_guest(svm->vcpu.kvm)) { - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); - } - + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); @@ -710,12 +707,6 @@ static void clr_dr_intercepts(struct vcpu_svm *svm) vmcb->control.intercepts[INTERCEPT_DR] = 0; - /* DR7 access must remain intercepted for an SEV-ES guest */ - if (sev_es_guest(svm->vcpu.kvm)) { - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); - } - recalc_intercepts(svm); } -- cgit From d1f85fbe836e61ed330a242f927a98abd98929cc Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Thu, 15 Jun 2023 16:37:54 +1000 Subject: KVM: SEV: Enable data breakpoints in SEV-ES Add support for "DebugSwap for SEV-ES guests", which provides support for swapping DR[0-3] and DR[0-3]_ADDR_MASK on VMRUN and VMEXIT, i.e. allows KVM to expose debug capabilities to SEV-ES guests. Without DebugSwap support, the CPU doesn't save/load most _guest_ debug registers (except DR6/7), and KVM cannot manually context switch guest DRs due the VMSA being encrypted. Enable DebugSwap if and only if the CPU also supports NoNestedDataBp, which causes the CPU to ignore nested #DBs, i.e. #DBs that occur when vectoring a #DB. Without NoNestedDataBp, a malicious guest can DoS the host by putting the CPU into an infinite loop of vectoring #DBs (see https://bugzilla.redhat.com/show_bug.cgi?id=1278496) Set the features bit in sev_es_sync_vmsa() which is the last point when VMSA is not encrypted yet as sev_(es_)init_vmcb() (where the most init happens) is called not only when VCPU is initialised but also on intrahost migration when VMSA is encrypted. Eliminate DR7 intercepts as KVM can't modify guest DR7, and intercepting DR7 would completely defeat the purpose of enabling DebugSwap. Make X86_FEATURE_DEBUG_SWAP appear in /proc/cpuinfo (by not adding "") to let the operator know if the VM can debug. Signed-off-by: Alexey Kardashevskiy Link: https://lore.kernel.org/r/20230615063757.3039121-7-aik@amd.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/svm.h | 1 + arch/x86/kvm/svm/sev.c | 36 +++++++++++++++++++++++++++++--- tools/arch/x86/include/asm/cpufeatures.h | 1 + 4 files changed, 36 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index cb8ca46213be..31c862d79fae 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -434,6 +434,7 @@ #define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */ #define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */ #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */ +#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* AMD SEV-ES full debug state swap support */ /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */ #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */ diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index e7c7379d6ac7..72ebd5e4e975 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -288,6 +288,7 @@ static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_ #define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF) +#define SVM_SEV_FEAT_DEBUG_SWAP BIT(5) struct vmcb_seg { u16 selector; diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index b3ef509f6fda..b7cd0cc4a19c 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "mmu.h" #include "x86.h" @@ -54,9 +55,14 @@ module_param_named(sev, sev_enabled, bool, 0444); /* enable/disable SEV-ES support */ static bool sev_es_enabled = true; module_param_named(sev_es, sev_es_enabled, bool, 0444); + +/* enable/disable SEV-ES DebugSwap support */ +static bool sev_es_debug_swap_enabled = true; +module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444); #else #define sev_enabled false #define sev_es_enabled false +#define sev_es_debug_swap_enabled false #endif /* CONFIG_KVM_AMD_SEV */ static u8 sev_enc_bit; @@ -606,6 +612,9 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm) save->xss = svm->vcpu.arch.ia32_xss; save->dr6 = svm->vcpu.arch.dr6; + if (sev_es_debug_swap_enabled) + save->sev_features |= SVM_SEV_FEAT_DEBUG_SWAP; + pr_debug("Virtual Machine Save Area (VMSA):\n"); print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false); @@ -2261,6 +2270,9 @@ out: sev_enabled = sev_supported; sev_es_enabled = sev_es_supported; + if (!sev_es_enabled || !cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP) || + !cpu_feature_enabled(X86_FEATURE_NO_NESTED_DATA_BP)) + sev_es_debug_swap_enabled = false; #endif } @@ -2981,9 +2993,11 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm) svm_set_intercept(svm, TRAP_CR8_WRITE); vmcb->control.intercepts[INTERCEPT_DR] = 0; - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); - vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); - recalc_intercepts(svm); + if (!sev_es_debug_swap_enabled) { + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); + vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); + recalc_intercepts(svm); + } /* Can't intercept XSETBV, HV can't modify XCR0 directly */ svm_clr_intercept(svm, INTERCEPT_XSETBV); @@ -3053,6 +3067,22 @@ void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa) hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); hostsa->pkru = read_pkru(); hostsa->xss = host_xss; + + /* + * If DebugSwap is enabled, debug registers are loaded but NOT saved by + * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both + * saves and loads debug registers (Type-A). + */ + if (sev_es_debug_swap_enabled) { + hostsa->dr0 = native_get_debugreg(0); + hostsa->dr1 = native_get_debugreg(1); + hostsa->dr2 = native_get_debugreg(2); + hostsa->dr3 = native_get_debugreg(3); + hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0); + hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1); + hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2); + hostsa->dr3_addr_mask = amd_get_dr_addr_mask(3); + } } void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index cb8ca46213be..31c862d79fae 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -434,6 +434,7 @@ #define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */ #define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */ #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */ +#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* AMD SEV-ES full debug state swap support */ /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */ #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */ -- cgit From 90cbf6d914ad7856ca1145dee02babb9eab7bec1 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Thu, 15 Jun 2023 16:37:55 +1000 Subject: KVM: SEV-ES: Eliminate #DB intercept when DebugSwap enabled Disable #DB for SEV-ES guests when DebugSwap is enabled. There is no point in such intercept as KVM does not allow guest debug for SEV-ES guests. Signed-off-by: Alexey Kardashevskiy Link: https://lore.kernel.org/r/20230615063757.3039121-8-aik@amd.com [sean: add comment as to why KVM disables #DB intercept iff DebugSwap=1] Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/sev.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index b7cd0cc4a19c..b35cd670ce66 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2997,6 +2997,17 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm) vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); recalc_intercepts(svm); + } else { + /* + * Disable #DB intercept iff DebugSwap is enabled. KVM doesn't + * allow debugging SEV-ES guests, and enables DebugSwap iff + * NO_NESTED_DATA_BP is supported, so there's no reason to + * intercept #DB when DebugSwap is enabled. For simplicity + * with respect to guest debug, intercept #DB for other VMs + * even if NO_NESTED_DATA_BP is supported, i.e. even if the + * guest can't DoS the CPU with infinite #DB vectoring. + */ + clr_exception_intercept(svm, DB_VECTOR); } /* Can't intercept XSETBV, HV can't modify XCR0 directly */ -- cgit From 389fbbec261b2842fd0e34b26a2b288b122cc406 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 15 Jun 2023 16:37:56 +1000 Subject: KVM: SVM: Don't defer NMI unblocking until next exit for SEV-ES guests Immediately mark NMIs as unmasked in response to #VMGEXIT(NMI complete) instead of setting awaiting_iret_completion and waiting until the *next* VM-Exit to unmask NMIs. The whole point of "NMI complete" is that the guest is responsible for telling the hypervisor when it's safe to inject an NMI, i.e. there's no need to wait. And because there's no IRET to single-step, the next VM-Exit could be a long time coming, i.e. KVM could incorrectly hold an NMI pending for far longer than what is required and expected. Opportunistically fix a stale reference to HF_IRET_MASK. Fixes: 916b54a7688b ("KVM: x86: Move HF_NMI_MASK and HF_IRET_MASK into "struct vcpu_svm"") Fixes: 4444dfe4050b ("KVM: SVM: Add NMI support for an SEV-ES guest") Cc: Tom Lendacky Link: https://lore.kernel.org/r/20230615063757.3039121-9-aik@amd.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/sev.c | 5 ++++- arch/x86/kvm/svm/svm.c | 10 +++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index b35cd670ce66..2cd15783dfb9 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2900,7 +2900,10 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) svm->sev_es.ghcb_sa); break; case SVM_VMGEXIT_NMI_COMPLETE: - ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET); + ++vcpu->stat.nmi_window_exits; + svm->nmi_masked = false; + kvm_make_request(KVM_REQ_EVENT, vcpu); + ret = 1; break; case SVM_VMGEXIT_AP_HLT_LOOP: ret = kvm_emulate_ap_reset_hold(vcpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 8fad9ebb5126..b15bc158ecc6 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2535,12 +2535,13 @@ static int iret_interception(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); + WARN_ON_ONCE(sev_es_guest(vcpu->kvm)); + ++vcpu->stat.nmi_window_exits; svm->awaiting_iret_completion = true; svm_clr_iret_intercept(svm); - if (!sev_es_guest(vcpu->kvm)) - svm->nmi_iret_rip = kvm_rip_read(vcpu); + svm->nmi_iret_rip = kvm_rip_read(vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu); return 1; @@ -3950,12 +3951,11 @@ static void svm_complete_interrupts(struct kvm_vcpu *vcpu) svm->soft_int_injected = false; /* - * If we've made progress since setting HF_IRET_MASK, we've + * If we've made progress since setting awaiting_iret_completion, we've * executed an IRET and can allow NMI injection. */ if (svm->awaiting_iret_completion && - (sev_es_guest(vcpu->kvm) || - kvm_rip_read(vcpu) != svm->nmi_iret_rip)) { + kvm_rip_read(vcpu) != svm->nmi_iret_rip) { svm->awaiting_iret_completion = false; svm->nmi_masked = false; kvm_make_request(KVM_REQ_EVENT, vcpu); -- cgit From a6bb5709029756a6151afed0eec26ba2bfabed51 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 15 Jun 2023 16:37:57 +1000 Subject: KVM: SVM: Don't try to pointlessly single-step SEV-ES guests for NMI window Bail early from svm_enable_nmi_window() for SEV-ES guests without trying to enable single-step of the guest, as single-stepping an SEV-ES guest is impossible and the guest is responsible for *telling* KVM when it is ready for an new NMI to be injected. Functionally, setting TF and RF in svm->vmcb->save.rflags is benign as the field is ignored by hardware, but it's all kinds of confusing. Signed-off-by: Alexey Kardashevskiy Link: https://lore.kernel.org/r/20230615063757.3039121-10-aik@amd.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index b15bc158ecc6..1bc0936bbd51 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3802,6 +3802,19 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion) return; /* IRET will cause a vm exit */ + /* + * SEV-ES guests are responsible for signaling when a vCPU is ready to + * receive a new NMI, as SEV-ES guests can't be single-stepped, i.e. + * KVM can't intercept and single-step IRET to detect when NMIs are + * unblocked (architecturally speaking). See SVM_VMGEXIT_NMI_COMPLETE. + * + * Note, GIF is guaranteed to be '1' for SEV-ES guests as hardware + * ignores SEV-ES guest writes to EFER.SVME *and* CLGI/STGI are not + * supported NAEs in the GHCB protocol. + */ + if (sev_es_guest(vcpu->kvm)) + return; + if (!gif_set(svm)) { if (vgif) svm_set_intercept(svm, INTERCEPT_STGI); -- cgit From 1d6664fadda3e028b8220ba1ba487a928e801825 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Sun, 25 Jun 2023 15:34:38 +0800 Subject: KVM: x86: Use sysfs_emit() instead of sprintf() Use sysfs_emit() instead of the sprintf() for sysfs entries. sysfs_emit() knows the maximum of the temporary buffer used for outputting sysfs content and avoids overrunning the buffer length. Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20230625073438.57427-1-likexu@tencent.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/vmx/vmx.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ec169f5c7dce..31d9cbe817a2 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -6862,7 +6862,7 @@ static void mmu_destroy_caches(void) static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp) { if (nx_hugepage_mitigation_hard_disabled) - return sprintf(buffer, "never\n"); + return sysfs_emit(buffer, "never\n"); return param_get_bool(buffer, kp); } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 0ecf4be2c6af..254f2296e549 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -366,9 +366,9 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) { if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param))) - return sprintf(s, "???\n"); + return sysfs_emit(s, "???\n"); - return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); + return sysfs_emit(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); } static void vmx_setup_fb_clear_ctrl(void) -- cgit From 0d033770d43a7aa36025bcef9a60da7fb750f735 Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Fri, 28 Jul 2023 02:12:57 +0200 Subject: KVM: x86: Fix KVM_CAP_SYNC_REGS's sync_regs() TOCTOU issues In a spirit of using a sledgehammer to crack a nut, make sync_regs() feed __set_sregs() and kvm_vcpu_ioctl_x86_set_vcpu_events() with kernel's own copy of data. Both __set_sregs() and kvm_vcpu_ioctl_x86_set_vcpu_events() assume they have exclusive rights to structs they operate on. While this is true when coming from an ioctl handler (caller makes a local copy of user's data), sync_regs() breaks this contract; a pointer to a user-modifiable memory (vcpu->run->s.regs) is provided. This can lead to a situation when incoming data is checked and/or sanitized only to be re-set by a user thread running in parallel. Signed-off-by: Michal Luczaj Fixes: 01643c51bfcf ("KVM: x86: KVM_CAP_SYNC_REGS") Link: https://lore.kernel.org/r/20230728001606.2275586-2-mhal@rbox.co Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a6b9bea62fb8..0145d844283b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11777,15 +11777,22 @@ static int sync_regs(struct kvm_vcpu *vcpu) __set_regs(vcpu, &vcpu->run->s.regs.regs); vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; } + if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { - if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) + struct kvm_sregs sregs = vcpu->run->s.regs.sregs; + + if (__set_sregs(vcpu, &sregs)) return -EINVAL; + vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; } + if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { - if (kvm_vcpu_ioctl_x86_set_vcpu_events( - vcpu, &vcpu->run->s.regs.events)) + struct kvm_vcpu_events events = vcpu->run->s.regs.events; + + if (kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events)) return -EINVAL; + vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; } -- cgit From ae895cbe613a5b193f3953c90c075306bfa4d30b Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Fri, 28 Jul 2023 02:12:58 +0200 Subject: KVM: selftests: Extend x86's sync_regs_test to check for CR4 races Attempt to modify vcpu->run->s.regs _after_ the sanity checks performed by KVM_CAP_SYNC_REGS's arch/x86/kvm/x86.c:sync_regs(). This can lead to some nonsensical vCPU states accompanied by kernel splats, e.g. disabling PAE while long mode is enabled makes KVM all kinds of confused: WARNING: CPU: 0 PID: 1142 at arch/x86/kvm/mmu/paging_tmpl.h:358 paging32_walk_addr_generic+0x431/0x8f0 [kvm] arch/x86/kvm/mmu/paging_tmpl.h: KVM_BUG_ON(is_long_mode(vcpu) && !is_pae(vcpu), vcpu->kvm) Signed-off-by: Michal Luczaj Link: https://lore.kernel.org/r/20230728001606.2275586-3-mhal@rbox.co [sean: see link] Signed-off-by: Sean Christopherson --- .../testing/selftests/kvm/x86_64/sync_regs_test.c | 72 ++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c index 2da89fdc2471..13ac3ae8e704 100644 --- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c +++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "test_util.h" #include "kvm_util.h" @@ -80,6 +81,75 @@ static void compare_vcpu_events(struct kvm_vcpu_events *left, #define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS) #define INVALID_SYNC_FIELD 0x80000000 +/* + * Toggle CR4.PAE while KVM is processing SREGS, EFER.LME=1 with CR4.PAE=0 is + * illegal, and KVM's MMU heavily relies on vCPU state being valid. + */ +static noinline void *race_sregs_cr4(void *arg) +{ + struct kvm_run *run = (struct kvm_run *)arg; + __u64 *cr4 = &run->s.regs.sregs.cr4; + __u64 pae_enabled = *cr4; + __u64 pae_disabled = *cr4 & ~X86_CR4_PAE; + + for (;;) { + WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_SREGS); + WRITE_ONCE(*cr4, pae_enabled); + asm volatile(".rept 512\n\t" + "nop\n\t" + ".endr"); + WRITE_ONCE(*cr4, pae_disabled); + + pthread_testcancel(); + } + + return NULL; +} + +static void race_sync_regs(void *racer) +{ + const time_t TIMEOUT = 2; /* seconds, roughly */ + struct kvm_translation tr; + struct kvm_vcpu *vcpu; + struct kvm_run *run; + struct kvm_vm *vm; + pthread_t thread; + time_t t; + + vm = vm_create_with_one_vcpu(&vcpu, guest_code); + run = vcpu->run; + + run->kvm_valid_regs = KVM_SYNC_X86_SREGS; + vcpu_run(vcpu); + run->kvm_valid_regs = 0; + + /* + * Selftests run 64-bit guests by default, both EFER.LME and CR4.PAE + * should already be set in guest state. + */ + TEST_ASSERT((run->s.regs.sregs.cr4 & X86_CR4_PAE) && + (run->s.regs.sregs.efer & EFER_LME), + "vCPU should be in long mode, CR4.PAE=%d, EFER.LME=%d", + !!(run->s.regs.sregs.cr4 & X86_CR4_PAE), + !!(run->s.regs.sregs.efer & EFER_LME)); + + ASSERT_EQ(pthread_create(&thread, NULL, racer, (void *)run), 0); + + for (t = time(NULL) + TIMEOUT; time(NULL) < t;) { + __vcpu_run(vcpu); + + if (racer == race_sregs_cr4) { + tr = (struct kvm_translation) { .linear_address = 0 }; + __vcpu_ioctl(vcpu, KVM_TRANSLATE, &tr); + } + } + + ASSERT_EQ(pthread_cancel(thread), 0); + ASSERT_EQ(pthread_join(thread, NULL), 0); + + kvm_vm_free(vm); +} + int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; @@ -218,5 +288,7 @@ int main(int argc, char *argv[]) kvm_vm_free(vm); + race_sync_regs(race_sregs_cr4); + return 0; } -- cgit From 60c4063b475215321fc63ab253c9a840bb664f35 Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Fri, 28 Jul 2023 02:12:58 +0200 Subject: KVM: selftests: Extend x86's sync_regs_test to check for event vector races Attempt to modify the to-be-injected exception vector to an illegal value _after_ the sanity checks performed by KVM_CAP_SYNC_REGS's arch/x86/kvm/x86.c:kvm_vcpu_ioctl_x86_set_vcpu_events(). Buggy KVM versions will eventually yells loudly about attempting to inject a bogus vector, e.g. WARNING: CPU: 0 PID: 1107 at arch/x86/kvm/x86.c:547 kvm_check_and_inject_events+0x4a0/0x500 [kvm] arch/x86/kvm/x86.c:exception_type(): WARN_ON(vector > 31 || vector == NMI_VECTOR) Signed-off-by: Michal Luczaj Link: https://lore.kernel.org/r/20230728001606.2275586-3-mhal@rbox.co [sean: split to separate patch] Signed-off-by: Sean Christopherson --- .../testing/selftests/kvm/x86_64/sync_regs_test.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c index 13ac3ae8e704..e4ff2eaad7cd 100644 --- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c +++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c @@ -81,6 +81,27 @@ static void compare_vcpu_events(struct kvm_vcpu_events *left, #define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS) #define INVALID_SYNC_FIELD 0x80000000 +/* + * Set an invalid exception vector while KVM is processing events. KVM is + * supposed to reject any vector >= 32, as well as NMIs (vector 2). + */ +static void *race_events_exc(void *arg) +{ + struct kvm_run *run = (struct kvm_run *)arg; + struct kvm_vcpu_events *events = &run->s.regs.events; + + for (;;) { + WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS); + WRITE_ONCE(events->flags, 0); + WRITE_ONCE(events->exception.pending, 1); + WRITE_ONCE(events->exception.nr, 255); + + pthread_testcancel(); + } + + return NULL; +} + /* * Toggle CR4.PAE while KVM is processing SREGS, EFER.LME=1 with CR4.PAE=0 is * illegal, and KVM's MMU heavily relies on vCPU state being valid. @@ -289,6 +310,7 @@ int main(int argc, char *argv[]) kvm_vm_free(vm); race_sync_regs(race_sregs_cr4); + race_sync_regs(race_events_exc); return 0; } -- cgit From 0de704d2d6c82c7498fa1b8df66903f8139e3de2 Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Fri, 28 Jul 2023 02:12:58 +0200 Subject: KVM: selftests: Extend x86's sync_regs_test to check for exception races Attempt to set the to-be-queued exception to be both pending and injected _after_ KVM_CAP_SYNC_REGS's kvm_vcpu_ioctl_x86_set_vcpu_events() squashes the pending exception (if there's also an injected exception). Buggy KVM versions will eventually yell loudly about having impossible state when processing queued excpetions, e.g. WARNING: CPU: 0 PID: 1115 at arch/x86/kvm/x86.c:10095 kvm_check_and_inject_events+0x220/0x500 [kvm] arch/x86/kvm/x86.c:kvm_check_and_inject_events(): WARN_ON_ONCE(vcpu->arch.exception.injected && vcpu->arch.exception.pending); Signed-off-by: Michal Luczaj Link: https://lore.kernel.org/r/20230728001606.2275586-3-mhal@rbox.co [sean: split to separate patch, massage changelog and comment] Signed-off-by: Sean Christopherson --- .../testing/selftests/kvm/x86_64/sync_regs_test.c | 23 ++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c index e4ff2eaad7cd..7e5b13869e2e 100644 --- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c +++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c @@ -81,6 +81,28 @@ static void compare_vcpu_events(struct kvm_vcpu_events *left, #define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS) #define INVALID_SYNC_FIELD 0x80000000 +/* + * Set an exception as pending *and* injected while KVM is processing events. + * KVM is supposed to ignore/drop pending exceptions if userspace is also + * requesting that an exception be injected. + */ +static void *race_events_inj_pen(void *arg) +{ + struct kvm_run *run = (struct kvm_run *)arg; + struct kvm_vcpu_events *events = &run->s.regs.events; + + for (;;) { + WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS); + WRITE_ONCE(events->flags, 0); + WRITE_ONCE(events->exception.injected, 1); + WRITE_ONCE(events->exception.pending, 1); + + pthread_testcancel(); + } + + return NULL; +} + /* * Set an invalid exception vector while KVM is processing events. KVM is * supposed to reject any vector >= 32, as well as NMIs (vector 2). @@ -311,6 +333,7 @@ int main(int argc, char *argv[]) race_sync_regs(race_sregs_cr4); race_sync_regs(race_events_exc); + race_sync_regs(race_events_inj_pen); return 0; } -- cgit From b859b018aadfd05f0526533fd1bcf04024d01917 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Mon, 31 Jul 2023 10:24:05 +0800 Subject: KVM: selftests: use unified time type for comparison With test case kvm_page_table_test, start time is acquired with time type CLOCK_MONOTONIC_RAW, however end time in timespec_elapsed() is acquired with time type CLOCK_MONOTONIC. This can cause inaccurate elapsed time calculation due to mixing timebases, e.g. LoongArch in particular will see weirdness. Modify kvm_page_table_test to use unified time type CLOCK_MONOTONIC for start time. Signed-off-by: Bibo Mao Reviewed-by: Oliver Upton Link: https://lore.kernel.org/r/20230731022405.854884-1-maobibo@loongson.cn [sean: massage changelog] Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/kvm_page_table_test.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c index b3b00be1ef82..69f26d80c821 100644 --- a/tools/testing/selftests/kvm/kvm_page_table_test.c +++ b/tools/testing/selftests/kvm/kvm_page_table_test.c @@ -200,7 +200,7 @@ static void *vcpu_worker(void *data) if (READ_ONCE(host_quit)) return NULL; - clock_gettime(CLOCK_MONOTONIC_RAW, &start); + clock_gettime(CLOCK_MONOTONIC, &start); ret = _vcpu_run(vcpu); ts_diff = timespec_elapsed(start); @@ -367,7 +367,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) /* Test the stage of KVM creating mappings */ *current_stage = KVM_CREATE_MAPPINGS; - clock_gettime(CLOCK_MONOTONIC_RAW, &start); + clock_gettime(CLOCK_MONOTONIC, &start); vcpus_complete_new_stage(*current_stage); ts_diff = timespec_elapsed(start); @@ -380,7 +380,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) *current_stage = KVM_UPDATE_MAPPINGS; - clock_gettime(CLOCK_MONOTONIC_RAW, &start); + clock_gettime(CLOCK_MONOTONIC, &start); vcpus_complete_new_stage(*current_stage); ts_diff = timespec_elapsed(start); @@ -392,7 +392,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) *current_stage = KVM_ADJUST_MAPPINGS; - clock_gettime(CLOCK_MONOTONIC_RAW, &start); + clock_gettime(CLOCK_MONOTONIC, &start); vcpus_complete_new_stage(*current_stage); ts_diff = timespec_elapsed(start); -- cgit From 7e4966e6e13d0dad5eae934aff8b875a2aa8f469 Mon Sep 17 00:00:00 2001 From: Minjie Du Date: Tue, 4 Jul 2023 20:21:47 +0800 Subject: KVM: selftests: Remove superfluous variable assignment Don't nullify "nodep" to NULL one line before it's set to "tmp". Signed-off-by: Minjie Du Link: https://lore.kernel.org/r/20230704122148.11573-1-duminjie@vivo.com [sean: massage shortlog+changelog] Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/lib/sparsebit.c | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c index 50e0cf41a7dd..88cb6b84e6f3 100644 --- a/tools/testing/selftests/kvm/lib/sparsebit.c +++ b/tools/testing/selftests/kvm/lib/sparsebit.c @@ -634,7 +634,6 @@ static void node_reduce(struct sparsebit *s, struct node *nodep) tmp = node_prev(s, nodep); node_rm(s, nodep); - nodep = NULL; nodep = tmp; reduction_performed = true; -- cgit From 6d85f51a1f08411617d0bfe12c537ed6eba7f0f4 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Wed, 12 Jul 2023 09:59:07 +0200 Subject: KVM: selftests: Rename the ASSERT_EQ macro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is already an ASSERT_EQ macro in the file tools/testing/selftests/kselftest_harness.h, so currently KVM selftests can't include test_util.h from the KVM selftests together with that file. Rename the macro in the KVM selftests to TEST_ASSERT_EQ to avoid the problem - it is also more similar to the other macros in test_util.h that way. Suggested-by: Sean Christopherson Signed-off-by: Thomas Huth Reviewed-by: Philippe Mathieu-Daudé Link: https://lore.kernel.org/r/20230712075910.22480-2-thuth@redhat.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/aarch64/aarch32_id_regs.c | 8 +-- .../selftests/kvm/aarch64/page_fault_test.c | 10 ++-- tools/testing/selftests/kvm/include/test_util.h | 4 +- tools/testing/selftests/kvm/lib/kvm_util.c | 2 +- .../testing/selftests/kvm/max_guest_memory_test.c | 2 +- tools/testing/selftests/kvm/s390x/cmma_test.c | 62 +++++++++++----------- tools/testing/selftests/kvm/s390x/memop.c | 6 +-- tools/testing/selftests/kvm/s390x/tprot.c | 4 +- .../kvm/x86_64/dirty_log_page_splitting_test.c | 18 +++---- .../kvm/x86_64/exit_on_emulation_failure_test.c | 2 +- .../selftests/kvm/x86_64/nested_exceptions_test.c | 12 ++--- .../selftests/kvm/x86_64/recalc_apic_map_test.c | 6 +-- .../testing/selftests/kvm/x86_64/sync_regs_test.c | 6 +-- tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c | 32 +++++------ .../vmx_exception_with_invalid_guest_state.c | 2 +- .../selftests/kvm/x86_64/vmx_pmu_caps_test.c | 3 +- .../selftests/kvm/x86_64/xapic_state_test.c | 8 +-- .../testing/selftests/kvm/x86_64/xen_vmcall_test.c | 20 +++---- 18 files changed, 104 insertions(+), 103 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c index 4951ac53d1f8..b90580840b22 100644 --- a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c +++ b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c @@ -98,7 +98,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu) uint64_t val; vcpu_get_reg(vcpu, reg_id, &val); - ASSERT_EQ(val, 0); + TEST_ASSERT_EQ(val, 0); /* * Expect the ioctl to succeed with no effect on the register @@ -107,7 +107,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu) vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL); vcpu_get_reg(vcpu, reg_id, &val); - ASSERT_EQ(val, 0); + TEST_ASSERT_EQ(val, 0); } } @@ -127,14 +127,14 @@ static void test_user_raz_invariant(struct kvm_vcpu *vcpu) uint64_t val; vcpu_get_reg(vcpu, reg_id, &val); - ASSERT_EQ(val, 0); + TEST_ASSERT_EQ(val, 0); r = __vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL); TEST_ASSERT(r < 0 && errno == EINVAL, "unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno); vcpu_get_reg(vcpu, reg_id, &val); - ASSERT_EQ(val, 0); + TEST_ASSERT_EQ(val, 0); } } diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c index df10f1ffa20d..e5bb8767d2cb 100644 --- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c +++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c @@ -318,7 +318,7 @@ static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg, TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING, "The only expected UFFD mode is MISSING"); - ASSERT_EQ(addr, (uint64_t)args->hva); + TEST_ASSERT_EQ(addr, (uint64_t)args->hva); pr_debug("uffd fault: addr=%p write=%d\n", (void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE)); @@ -432,7 +432,7 @@ static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run) region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA); hva = (void *)region->region.userspace_addr; - ASSERT_EQ(run->mmio.phys_addr, region->region.guest_phys_addr); + TEST_ASSERT_EQ(run->mmio.phys_addr, region->region.guest_phys_addr); memcpy(hva, run->mmio.data, run->mmio.len); events.mmio_exits += 1; @@ -631,9 +631,9 @@ static void setup_default_handlers(struct test_desc *test) static void check_event_counts(struct test_desc *test) { - ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults); - ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits); - ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs); + TEST_ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults); + TEST_ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits); + TEST_ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs); } static void print_test_banner(enum vm_guest_mode mode, struct test_params *p) diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index a6e9f215ce70..e734e52d8a3a 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -53,11 +53,11 @@ void test_assert(bool exp, const char *exp_str, #define TEST_ASSERT(e, fmt, ...) \ test_assert((e), #e, __FILE__, __LINE__, fmt, ##__VA_ARGS__) -#define ASSERT_EQ(a, b) do { \ +#define TEST_ASSERT_EQ(a, b) do { \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ TEST_ASSERT(__a == __b, \ - "ASSERT_EQ(%s, %s) failed.\n" \ + "TEST_ASSERT_EQ(%s, %s) failed.\n" \ "\t%s is %#lx\n" \ "\t%s is %#lx", \ #a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \ diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 9741a7ff6380..3170d7a4520b 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -994,7 +994,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, if (src_type == VM_MEM_SRC_ANONYMOUS_THP) alignment = max(backing_src_pagesz, alignment); - ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz)); + TEST_ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz)); /* Add enough memory to align up if necessary */ if (alignment > 1) diff --git a/tools/testing/selftests/kvm/max_guest_memory_test.c b/tools/testing/selftests/kvm/max_guest_memory_test.c index feaf2be20ff2..6628dc4dda89 100644 --- a/tools/testing/selftests/kvm/max_guest_memory_test.c +++ b/tools/testing/selftests/kvm/max_guest_memory_test.c @@ -55,7 +55,7 @@ static void rendezvous_with_boss(void) static void run_vcpu(struct kvm_vcpu *vcpu) { vcpu_run(vcpu); - ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE); + TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE); } static void *vcpu_worker(void *data) diff --git a/tools/testing/selftests/kvm/s390x/cmma_test.c b/tools/testing/selftests/kvm/s390x/cmma_test.c index 1d73e78e8fa7..c8e0a6495a63 100644 --- a/tools/testing/selftests/kvm/s390x/cmma_test.c +++ b/tools/testing/selftests/kvm/s390x/cmma_test.c @@ -237,8 +237,8 @@ static void test_get_cmma_basic(void) /* GET_CMMA_BITS without CMMA enabled should fail */ rc = vm_get_cmma_bits(vm, 0, &errno_out); - ASSERT_EQ(rc, -1); - ASSERT_EQ(errno_out, ENXIO); + TEST_ASSERT_EQ(rc, -1); + TEST_ASSERT_EQ(errno_out, ENXIO); enable_cmma(vm); vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa); @@ -247,31 +247,31 @@ static void test_get_cmma_basic(void) /* GET_CMMA_BITS without migration mode and without peeking should fail */ rc = vm_get_cmma_bits(vm, 0, &errno_out); - ASSERT_EQ(rc, -1); - ASSERT_EQ(errno_out, EINVAL); + TEST_ASSERT_EQ(rc, -1); + TEST_ASSERT_EQ(errno_out, EINVAL); /* GET_CMMA_BITS without migration mode and with peeking should work */ rc = vm_get_cmma_bits(vm, KVM_S390_CMMA_PEEK, &errno_out); - ASSERT_EQ(rc, 0); - ASSERT_EQ(errno_out, 0); + TEST_ASSERT_EQ(rc, 0); + TEST_ASSERT_EQ(errno_out, 0); enable_dirty_tracking(vm); enable_migration_mode(vm); /* GET_CMMA_BITS with invalid flags */ rc = vm_get_cmma_bits(vm, 0xfeedc0fe, &errno_out); - ASSERT_EQ(rc, -1); - ASSERT_EQ(errno_out, EINVAL); + TEST_ASSERT_EQ(rc, -1); + TEST_ASSERT_EQ(errno_out, EINVAL); kvm_vm_free(vm); } static void assert_exit_was_hypercall(struct kvm_vcpu *vcpu) { - ASSERT_EQ(vcpu->run->exit_reason, 13); - ASSERT_EQ(vcpu->run->s390_sieic.icptcode, 4); - ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x8300); - ASSERT_EQ(vcpu->run->s390_sieic.ipb, 0x5010000); + TEST_ASSERT_EQ(vcpu->run->exit_reason, 13); + TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, 4); + TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x8300); + TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipb, 0x5010000); } static void test_migration_mode(void) @@ -283,8 +283,8 @@ static void test_migration_mode(void) /* enabling migration mode on a VM without memory should fail */ rc = __enable_migration_mode(vm); - ASSERT_EQ(rc, -1); - ASSERT_EQ(errno, EINVAL); + TEST_ASSERT_EQ(rc, -1); + TEST_ASSERT_EQ(errno, EINVAL); TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off"); errno = 0; @@ -304,8 +304,8 @@ static void test_migration_mode(void) /* migration mode when memslots have dirty tracking off should fail */ rc = __enable_migration_mode(vm); - ASSERT_EQ(rc, -1); - ASSERT_EQ(errno, EINVAL); + TEST_ASSERT_EQ(rc, -1); + TEST_ASSERT_EQ(errno, EINVAL); TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off"); errno = 0; @@ -314,7 +314,7 @@ static void test_migration_mode(void) /* enabling migration mode should work now */ rc = __enable_migration_mode(vm); - ASSERT_EQ(rc, 0); + TEST_ASSERT_EQ(rc, 0); TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on"); errno = 0; @@ -350,7 +350,7 @@ static void test_migration_mode(void) */ vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES); rc = __enable_migration_mode(vm); - ASSERT_EQ(rc, 0); + TEST_ASSERT_EQ(rc, 0); TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on"); errno = 0; @@ -394,9 +394,9 @@ static void assert_all_slots_cmma_dirty(struct kvm_vm *vm) }; memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf)); vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args); - ASSERT_EQ(args.count, MAIN_PAGE_COUNT); - ASSERT_EQ(args.remaining, TEST_DATA_PAGE_COUNT); - ASSERT_EQ(args.start_gfn, 0); + TEST_ASSERT_EQ(args.count, MAIN_PAGE_COUNT); + TEST_ASSERT_EQ(args.remaining, TEST_DATA_PAGE_COUNT); + TEST_ASSERT_EQ(args.start_gfn, 0); /* ...and then - after a hole - the TEST_DATA memslot should follow */ args = (struct kvm_s390_cmma_log){ @@ -407,9 +407,9 @@ static void assert_all_slots_cmma_dirty(struct kvm_vm *vm) }; memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf)); vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args); - ASSERT_EQ(args.count, TEST_DATA_PAGE_COUNT); - ASSERT_EQ(args.start_gfn, TEST_DATA_START_GFN); - ASSERT_EQ(args.remaining, 0); + TEST_ASSERT_EQ(args.count, TEST_DATA_PAGE_COUNT); + TEST_ASSERT_EQ(args.start_gfn, TEST_DATA_START_GFN); + TEST_ASSERT_EQ(args.remaining, 0); /* ...and nothing else should be there */ args = (struct kvm_s390_cmma_log){ @@ -420,9 +420,9 @@ static void assert_all_slots_cmma_dirty(struct kvm_vm *vm) }; memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf)); vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args); - ASSERT_EQ(args.count, 0); - ASSERT_EQ(args.start_gfn, 0); - ASSERT_EQ(args.remaining, 0); + TEST_ASSERT_EQ(args.count, 0); + TEST_ASSERT_EQ(args.start_gfn, 0); + TEST_ASSERT_EQ(args.remaining, 0); } /** @@ -498,11 +498,11 @@ static void assert_cmma_dirty(u64 first_dirty_gfn, u64 dirty_gfn_count, const struct kvm_s390_cmma_log *res) { - ASSERT_EQ(res->start_gfn, first_dirty_gfn); - ASSERT_EQ(res->count, dirty_gfn_count); + TEST_ASSERT_EQ(res->start_gfn, first_dirty_gfn); + TEST_ASSERT_EQ(res->count, dirty_gfn_count); for (size_t i = 0; i < dirty_gfn_count; i++) - ASSERT_EQ(cmma_value_buf[0], 0x0); /* stable state */ - ASSERT_EQ(cmma_value_buf[dirty_gfn_count], 0xff); /* not touched */ + TEST_ASSERT_EQ(cmma_value_buf[0], 0x0); /* stable state */ + TEST_ASSERT_EQ(cmma_value_buf[dirty_gfn_count], 0xff); /* not touched */ } static void test_get_skip_holes(void) diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c index 8e4b94d7b8dd..de73dc030905 100644 --- a/tools/testing/selftests/kvm/s390x/memop.c +++ b/tools/testing/selftests/kvm/s390x/memop.c @@ -281,8 +281,8 @@ enum stage { if (uc.cmd == UCALL_ABORT) { \ REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \ } \ - ASSERT_EQ(uc.cmd, UCALL_SYNC); \ - ASSERT_EQ(uc.args[1], __stage); \ + TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC); \ + TEST_ASSERT_EQ(uc.args[1], __stage); \ }) \ static void prepare_mem12(void) @@ -808,7 +808,7 @@ static void test_termination(void) HOST_SYNC(t.vcpu, STAGE_IDLED); MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168)); /* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */ - ASSERT_EQ(teid & teid_mask, 0); + TEST_ASSERT_EQ(teid & teid_mask, 0); kvm_vm_free(t.kvm_vm); } diff --git a/tools/testing/selftests/kvm/s390x/tprot.c b/tools/testing/selftests/kvm/s390x/tprot.c index a9a0b76e5fa4..40d3ea16c052 100644 --- a/tools/testing/selftests/kvm/s390x/tprot.c +++ b/tools/testing/selftests/kvm/s390x/tprot.c @@ -191,8 +191,8 @@ static void guest_code(void) get_ucall(__vcpu, &uc); \ if (uc.cmd == UCALL_ABORT) \ REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \ - ASSERT_EQ(uc.cmd, UCALL_SYNC); \ - ASSERT_EQ(uc.args[1], __stage); \ + TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC); \ + TEST_ASSERT_EQ(uc.args[1], __stage); \ }) #define HOST_SYNC(vcpu, stage) \ diff --git a/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c b/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c index beb7e2c10211..634c6bfcd572 100644 --- a/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c +++ b/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c @@ -72,7 +72,7 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) vcpu_run(vcpu); - ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC); + TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC); vcpu_last_completed_iteration[vcpu_idx] = current_iteration; @@ -179,12 +179,12 @@ static void run_test(enum vm_guest_mode mode, void *unused) * with that capability. */ if (dirty_log_manual_caps) { - ASSERT_EQ(stats_clear_pass[0].hugepages, 0); - ASSERT_EQ(stats_clear_pass[0].pages_4k, total_4k_pages); - ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages); + TEST_ASSERT_EQ(stats_clear_pass[0].hugepages, 0); + TEST_ASSERT_EQ(stats_clear_pass[0].pages_4k, total_4k_pages); + TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages); } else { - ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0); - ASSERT_EQ(stats_dirty_logging_enabled.pages_4k, total_4k_pages); + TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0); + TEST_ASSERT_EQ(stats_dirty_logging_enabled.pages_4k, total_4k_pages); } /* @@ -192,9 +192,9 @@ static void run_test(enum vm_guest_mode mode, void *unused) * memory again, the page counts should be the same as they were * right after initial population of memory. */ - ASSERT_EQ(stats_populated.pages_4k, stats_repopulated.pages_4k); - ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m); - ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g); + TEST_ASSERT_EQ(stats_populated.pages_4k, stats_repopulated.pages_4k); + TEST_ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m); + TEST_ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g); } static void help(char *name) diff --git a/tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c b/tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c index e334844d6e1d..6c2e5e0ceb1f 100644 --- a/tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c +++ b/tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c @@ -35,7 +35,7 @@ int main(int argc, char *argv[]) vcpu_run(vcpu); handle_flds_emulation_failure_exit(vcpu); vcpu_run(vcpu); - ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE); + TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE); kvm_vm_free(vm); return 0; diff --git a/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c b/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c index 6502aa23c2f8..5f074a6da90c 100644 --- a/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c +++ b/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c @@ -247,12 +247,12 @@ int main(int argc, char *argv[]) /* Verify the pending events comes back out the same as it went in. */ vcpu_events_get(vcpu, &events); - ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD, - KVM_VCPUEVENT_VALID_PAYLOAD); - ASSERT_EQ(events.exception.pending, true); - ASSERT_EQ(events.exception.nr, SS_VECTOR); - ASSERT_EQ(events.exception.has_error_code, true); - ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE); + TEST_ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD, + KVM_VCPUEVENT_VALID_PAYLOAD); + TEST_ASSERT_EQ(events.exception.pending, true); + TEST_ASSERT_EQ(events.exception.nr, SS_VECTOR); + TEST_ASSERT_EQ(events.exception.has_error_code, true); + TEST_ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE); /* * Run for real with the pending #SS, L1 should get a VM-Exit due to diff --git a/tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c b/tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c index 4c416ebe7d66..cbc92a862ea9 100644 --- a/tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c +++ b/tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c @@ -57,7 +57,7 @@ int main(void) for (i = 0; i < KVM_MAX_VCPUS; i++) vcpu_set_msr(vcpus[i], MSR_IA32_APICBASE, LAPIC_X2APIC); - ASSERT_EQ(pthread_create(&thread, NULL, race, vcpus[0]), 0); + TEST_ASSERT_EQ(pthread_create(&thread, NULL, race, vcpus[0]), 0); vcpuN = vcpus[KVM_MAX_VCPUS - 1]; for (t = time(NULL) + TIMEOUT; time(NULL) < t;) { @@ -65,8 +65,8 @@ int main(void) vcpu_set_msr(vcpuN, MSR_IA32_APICBASE, LAPIC_DISABLED); } - ASSERT_EQ(pthread_cancel(thread), 0); - ASSERT_EQ(pthread_join(thread, NULL), 0); + TEST_ASSERT_EQ(pthread_cancel(thread), 0); + TEST_ASSERT_EQ(pthread_join(thread, NULL), 0); kvm_vm_free(vm); diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c index 7e5b13869e2e..93fac74ca0a7 100644 --- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c +++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c @@ -176,7 +176,7 @@ static void race_sync_regs(void *racer) !!(run->s.regs.sregs.cr4 & X86_CR4_PAE), !!(run->s.regs.sregs.efer & EFER_LME)); - ASSERT_EQ(pthread_create(&thread, NULL, racer, (void *)run), 0); + TEST_ASSERT_EQ(pthread_create(&thread, NULL, racer, (void *)run), 0); for (t = time(NULL) + TIMEOUT; time(NULL) < t;) { __vcpu_run(vcpu); @@ -187,8 +187,8 @@ static void race_sync_regs(void *racer) } } - ASSERT_EQ(pthread_cancel(thread), 0); - ASSERT_EQ(pthread_join(thread, NULL), 0); + TEST_ASSERT_EQ(pthread_cancel(thread), 0); + TEST_ASSERT_EQ(pthread_join(thread, NULL), 0); kvm_vm_free(vm); } diff --git a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c index c9f67702f657..9265965bd2cd 100644 --- a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c +++ b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c @@ -103,39 +103,39 @@ int main(void) vm = vm_create_with_one_vcpu(&vcpu, guest_code); val = 0; - ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val); - ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); + TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val); + TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); /* Guest: writes to MSR_IA32_TSC affect both MSRs. */ run_vcpu(vcpu, 1); val = 1ull * GUEST_STEP; - ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val); - ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); + TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val); + TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); /* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs. */ run_vcpu(vcpu, 2); val = 2ull * GUEST_STEP; - ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val); - ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); + TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val); + TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); /* * Host: writes to MSR_IA32_TSC set the host-side offset * and therefore do not change MSR_IA32_TSC_ADJUST. */ vcpu_set_msr(vcpu, MSR_IA32_TSC, HOST_ADJUST + val); - ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); - ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); + TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); + TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); run_vcpu(vcpu, 3); /* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC. */ vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, UNITY * 123456); - ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); - ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456); + TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); + TEST_ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456); /* Restore previous value. */ vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, val); - ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); - ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); + TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); + TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); /* * Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the @@ -143,8 +143,8 @@ int main(void) */ run_vcpu(vcpu, 4); val = 3ull * GUEST_STEP; - ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); - ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); + TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); + TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); /* * Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side @@ -152,8 +152,8 @@ int main(void) */ run_vcpu(vcpu, 5); val = 4ull * GUEST_STEP; - ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val); - ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST); + TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val); + TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST); kvm_vm_free(vm); diff --git a/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c b/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c index be0bdb8c6f78..a9b827c69f32 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c @@ -50,7 +50,7 @@ static void set_timer(void) timer.it_value.tv_sec = 0; timer.it_value.tv_usec = 200; timer.it_interval = timer.it_value; - ASSERT_EQ(setitimer(ITIMER_REAL, &timer, NULL), 0); + TEST_ASSERT_EQ(setitimer(ITIMER_REAL, &timer, NULL), 0); } static void set_or_clear_invalid_guest_state(struct kvm_vcpu *vcpu, bool set) diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index 4c90f76930f9..34efd57c2b32 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -103,7 +103,8 @@ static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap) TEST_FAIL("Unexpected ucall: %lu", uc.cmd); } - ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), host_cap.capabilities); + TEST_ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), + host_cap.capabilities); vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); diff --git a/tools/testing/selftests/kvm/x86_64/xapic_state_test.c b/tools/testing/selftests/kvm/x86_64/xapic_state_test.c index 396c13f42457..ab75b873a4ad 100644 --- a/tools/testing/selftests/kvm/x86_64/xapic_state_test.c +++ b/tools/testing/selftests/kvm/x86_64/xapic_state_test.c @@ -65,17 +65,17 @@ static void ____test_icr(struct xapic_vcpu *x, uint64_t val) vcpu_ioctl(vcpu, KVM_SET_LAPIC, &xapic); vcpu_run(vcpu); - ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC); - ASSERT_EQ(uc.args[1], val); + TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC); + TEST_ASSERT_EQ(uc.args[1], val); vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic); icr = (u64)(*((u32 *)&xapic.regs[APIC_ICR])) | (u64)(*((u32 *)&xapic.regs[APIC_ICR2])) << 32; if (!x->is_x2apic) { val &= (-1u | (0xffull << (32 + 24))); - ASSERT_EQ(icr, val & ~APIC_ICR_BUSY); + TEST_ASSERT_EQ(icr, val & ~APIC_ICR_BUSY); } else { - ASSERT_EQ(icr & ~APIC_ICR_BUSY, val & ~APIC_ICR_BUSY); + TEST_ASSERT_EQ(icr & ~APIC_ICR_BUSY, val & ~APIC_ICR_BUSY); } } diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c index c94cde3b523f..e149d0574961 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c @@ -108,16 +108,16 @@ int main(int argc, char *argv[]) vcpu_run(vcpu); if (run->exit_reason == KVM_EXIT_XEN) { - ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL); - ASSERT_EQ(run->xen.u.hcall.cpl, 0); - ASSERT_EQ(run->xen.u.hcall.longmode, 1); - ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE); - ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1)); - ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2)); - ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3)); - ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4)); - ASSERT_EQ(run->xen.u.hcall.params[4], ARGVALUE(5)); - ASSERT_EQ(run->xen.u.hcall.params[5], ARGVALUE(6)); + TEST_ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL); + TEST_ASSERT_EQ(run->xen.u.hcall.cpl, 0); + TEST_ASSERT_EQ(run->xen.u.hcall.longmode, 1); + TEST_ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE); + TEST_ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1)); + TEST_ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2)); + TEST_ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3)); + TEST_ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4)); + TEST_ASSERT_EQ(run->xen.u.hcall.params[4], ARGVALUE(5)); + TEST_ASSERT_EQ(run->xen.u.hcall.params[5], ARGVALUE(6)); run->xen.u.hcall.result = RETVALUE; continue; } -- cgit From b145c58d95fff280c2726d0024005ae0a4ec378d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:11 -0700 Subject: KVM: selftests: Make TEST_ASSERT_EQ() output look like normal TEST_ASSERT() Clean up TEST_ASSERT_EQ() so that the (mostly) raw code is captured in the main assert message, not the helper macro's code. E.g. make this: x86_64/tsc_msrs_test.c:106: __a == __b pid=40470 tid=40470 errno=0 - Success 1 0x000000000040170e: main at tsc_msrs_test.c:106 2 0x0000000000416f23: __libc_start_call_main at libc-start.o:? 3 0x000000000041856f: __libc_start_main_impl at ??:? 4 0x0000000000401ef0: _start at ??:? TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val + 1) failed. rounded_host_rdmsr(MSR_IA32_TSC) is 0 val + 1 is 0x1 look like this: x86_64/tsc_msrs_test.c:106: rounded_host_rdmsr(MSR_IA32_TSC) == val + 1 pid=5737 tid=5737 errno=0 - Success 1 0x0000000000401714: main at tsc_msrs_test.c:106 2 0x0000000000415c23: __libc_start_call_main at libc-start.o:? 3 0x000000000041726f: __libc_start_main_impl at ??:? 4 0x0000000000401e60: _start at ??:? 0 != 0x1 (rounded_host_rdmsr(MSR_IA32_TSC) != val + 1) Opportunstically clean up the formatting of the entire macro. Link: https://lore.kernel.org/r/20230729003643.1053367-3-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/test_util.h | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index e734e52d8a3a..a4bea44f990c 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -53,14 +53,13 @@ void test_assert(bool exp, const char *exp_str, #define TEST_ASSERT(e, fmt, ...) \ test_assert((e), #e, __FILE__, __LINE__, fmt, ##__VA_ARGS__) -#define TEST_ASSERT_EQ(a, b) do { \ - typeof(a) __a = (a); \ - typeof(b) __b = (b); \ - TEST_ASSERT(__a == __b, \ - "TEST_ASSERT_EQ(%s, %s) failed.\n" \ - "\t%s is %#lx\n" \ - "\t%s is %#lx", \ - #a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \ +#define TEST_ASSERT_EQ(a, b) \ +do { \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + test_assert(__a == __b, #a " == " #b, __FILE__, __LINE__, \ + "%#lx != %#lx (%s != %s)", \ + (unsigned long)(__a), (unsigned long)(__b), #a, #b);\ } while (0) #define TEST_ASSERT_KVM_EXIT_REASON(vcpu, expected) do { \ -- cgit From 6783ca4105a75d1ba4f89b13cef1bc1f677c41e5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:12 -0700 Subject: KVM: selftests: Add a shameful hack to preserve/clobber GPRs across ucall Preserve or clobber all GPRs (except RIP and RSP, as they're saved and restored via the VMCS) when performing a ucall on x86 to fudge around a horrific long-standing bug in selftests' nested VMX support where L2's GPRs are not preserved across a nested VM-Exit. I.e. if a test triggers a nested VM-Exit to L1 in response to a ucall, e.g. GUEST_SYNC(), then L2's GPR state can be corrupted. The issues manifests as an unexpected #GP in clear_bit() when running the hyperv_evmcs test due to RBX being used to track the ucall object, and RBX being clobbered by the nested VM-Exit. The problematic hyperv_evmcs testcase is where L0 (test's host userspace) injects an NMI in response to GUEST_SYNC(8) from L2, but the bug could "randomly" manifest in any test that induces a nested VM-Exit from L0. The bug hasn't caused failures in the past due to sheer dumb luck. The obvious fix is to rework the nVMX helpers to save/restore L2 GPRs across VM-Exit and VM-Enter, but that is a much bigger task and carries its own risks, e.g. nSVM does save/restore GPRs, but not in a thread-safe manner, and there is a _lot_ of cleanup that can be done to unify code for doing VM-Enter on nVMX, nSVM, and eVMCS. Link: https://lore.kernel.org/r/20230729003643.1053367-4-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/lib/x86_64/ucall.c | 32 ++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/x86_64/ucall.c b/tools/testing/selftests/kvm/lib/x86_64/ucall.c index 4d41dc63cc9e..a53df3ece2f8 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/ucall.c +++ b/tools/testing/selftests/kvm/lib/x86_64/ucall.c @@ -14,8 +14,36 @@ void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) void ucall_arch_do_ucall(vm_vaddr_t uc) { - asm volatile("in %[port], %%al" - : : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax", "memory"); + /* + * FIXME: Revert this hack (the entire commit that added it) once nVMX + * preserves L2 GPRs across a nested VM-Exit. If a ucall from L2, e.g. + * to do a GUEST_SYNC(), lands the vCPU in L1, any and all GPRs can be + * clobbered by L1. Save and restore non-volatile GPRs (clobbering RBP + * in particular is problematic) along with RDX and RDI (which are + * inputs), and clobber volatile GPRs. *sigh* + */ +#define HORRIFIC_L2_UCALL_CLOBBER_HACK \ + "rcx", "rsi", "r8", "r9", "r10", "r11" + + asm volatile("push %%rbp\n\t" + "push %%r15\n\t" + "push %%r14\n\t" + "push %%r13\n\t" + "push %%r12\n\t" + "push %%rbx\n\t" + "push %%rdx\n\t" + "push %%rdi\n\t" + "in %[port], %%al\n\t" + "pop %%rdi\n\t" + "pop %%rdx\n\t" + "pop %%rbx\n\t" + "pop %%r12\n\t" + "pop %%r13\n\t" + "pop %%r14\n\t" + "pop %%r15\n\t" + "pop %%rbp\n\t" + : : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax", "memory", + HORRIFIC_L2_UCALL_CLOBBER_HACK); } void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) -- cgit From a1c1b55e116c14de4f8a78286b502101f2a1620c Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Fri, 28 Jul 2023 17:36:13 -0700 Subject: KVM: selftests: Add strnlen() to the string overrides Add strnlen() to the string overrides to allow it to be called in the guest. The implementation for strnlen() was taken from the kernel's generic version, lib/string.c. This will be needed when printf() is introduced. Signed-off-by: Aaron Lewis Link: https://lore.kernel.org/r/20230729003643.1053367-5-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/Makefile | 1 + tools/testing/selftests/kvm/lib/string_override.c | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index c692cc86e7da..f6c14ab476ac 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -204,6 +204,7 @@ endif CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ -Wno-gnu-variable-sized-type-not-at-end -MD\ -fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \ + -fno-builtin-strnlen \ -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \ -I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \ -I$( Date: Fri, 28 Jul 2023 17:36:14 -0700 Subject: KVM: selftests: Add guest_snprintf() to KVM selftests Add a local version of guest_snprintf() for use in the guest. Having a local copy allows the guest access to string formatting options without dependencies on LIBC. LIBC is problematic because it heavily relies on both AVX-512 instructions and a TLS, neither of which are guaranteed to be set up in the guest. The file guest_sprintf.c was lifted from arch/x86/boot/printf.c and adapted to work in the guest, including the addition of buffer length. I.e. s/sprintf/snprintf/ The functions where prefixed with "guest_" to allow guests to explicitly call them. A string formatted by this function is expected to succeed or die. If something goes wrong during the formatting process a GUEST_ASSERT() will be thrown. Signed-off-by: Aaron Lewis Link: https://lore.kernel.org/all/mtdi6smhur5rqffvpu7qux7mptonw223y2653x2nwzvgm72nlo@zyc4w3kwl3rg [sean: add a link to the discussion of other options] Link: https://lore.kernel.org/r/20230729003643.1053367-6-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/Makefile | 1 + tools/testing/selftests/kvm/include/test_util.h | 3 + tools/testing/selftests/kvm/lib/guest_sprintf.c | 307 ++++++++++++++++++++++++ 3 files changed, 311 insertions(+) create mode 100644 tools/testing/selftests/kvm/lib/guest_sprintf.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index f6c14ab476ac..f65889f5a083 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -23,6 +23,7 @@ LIBKVM += lib/guest_modes.c LIBKVM += lib/io.c LIBKVM += lib/kvm_util.c LIBKVM += lib/memstress.c +LIBKVM += lib/guest_sprintf.c LIBKVM += lib/rbtree.c LIBKVM += lib/sparsebit.c LIBKVM += lib/test_util.c diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index a4bea44f990c..7a5907da1719 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -185,4 +185,7 @@ static inline uint32_t atoi_non_negative(const char *name, const char *num_str) return num; } +int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args); +int guest_snprintf(char *buf, int n, const char *fmt, ...); + #endif /* SELFTEST_KVM_TEST_UTIL_H */ diff --git a/tools/testing/selftests/kvm/lib/guest_sprintf.c b/tools/testing/selftests/kvm/lib/guest_sprintf.c new file mode 100644 index 000000000000..c4a69d8aeb68 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/guest_sprintf.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include "test_util.h" +#include "kvm_util.h" +#include "ucall_common.h" + +#define APPEND_BUFFER_SAFE(str, end, v) \ +do { \ + GUEST_ASSERT(str < end); \ + *str++ = (v); \ +} while (0) + +static int isdigit(int ch) +{ + return (ch >= '0') && (ch <= '9'); +} + +static int skip_atoi(const char **s) +{ + int i = 0; + + while (isdigit(**s)) + i = i * 10 + *((*s)++) - '0'; + return i; +} + +#define ZEROPAD 1 /* pad with zero */ +#define SIGN 2 /* unsigned/signed long */ +#define PLUS 4 /* show plus */ +#define SPACE 8 /* space if plus */ +#define LEFT 16 /* left justified */ +#define SMALL 32 /* Must be 32 == 0x20 */ +#define SPECIAL 64 /* 0x */ + +#define __do_div(n, base) \ +({ \ + int __res; \ + \ + __res = ((uint64_t) n) % (uint32_t) base; \ + n = ((uint64_t) n) / (uint32_t) base; \ + __res; \ +}) + +static char *number(char *str, const char *end, long num, int base, int size, + int precision, int type) +{ + /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ + static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ + + char tmp[66]; + char c, sign, locase; + int i; + + /* + * locase = 0 or 0x20. ORing digits or letters with 'locase' + * produces same digits or (maybe lowercased) letters + */ + locase = (type & SMALL); + if (type & LEFT) + type &= ~ZEROPAD; + if (base < 2 || base > 16) + return NULL; + c = (type & ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & PLUS) { + sign = '+'; + size--; + } else if (type & SPACE) { + sign = ' '; + size--; + } + } + if (type & SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++] = '0'; + else + while (num != 0) + tmp[i++] = (digits[__do_div(num, base)] | locase); + if (i > precision) + precision = i; + size -= precision; + if (!(type & (ZEROPAD + LEFT))) + while (size-- > 0) + APPEND_BUFFER_SAFE(str, end, ' '); + if (sign) + APPEND_BUFFER_SAFE(str, end, sign); + if (type & SPECIAL) { + if (base == 8) + APPEND_BUFFER_SAFE(str, end, '0'); + else if (base == 16) { + APPEND_BUFFER_SAFE(str, end, '0'); + APPEND_BUFFER_SAFE(str, end, 'x'); + } + } + if (!(type & LEFT)) + while (size-- > 0) + APPEND_BUFFER_SAFE(str, end, c); + while (i < precision--) + APPEND_BUFFER_SAFE(str, end, '0'); + while (i-- > 0) + APPEND_BUFFER_SAFE(str, end, tmp[i]); + while (size-- > 0) + APPEND_BUFFER_SAFE(str, end, ' '); + + return str; +} + +int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args) +{ + char *str, *end; + const char *s; + uint64_t num; + int i, base; + int len; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* + * min. # of digits for integers; max + * number of chars for from string + */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + + end = buf + n; + GUEST_ASSERT(buf < end); + GUEST_ASSERT(n > 0); + + for (str = buf; *fmt; ++fmt) { + if (*fmt != '%') { + APPEND_BUFFER_SAFE(str, end, *fmt); + continue; + } + + /* process flags */ + flags = 0; +repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': + flags |= LEFT; + goto repeat; + case '+': + flags |= PLUS; + goto repeat; + case ' ': + flags |= SPACE; + goto repeat; + case '#': + flags |= SPECIAL; + goto repeat; + case '0': + flags |= ZEROPAD; + goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & LEFT)) + while (--field_width > 0) + APPEND_BUFFER_SAFE(str, end, ' '); + APPEND_BUFFER_SAFE(str, end, + (uint8_t)va_arg(args, int)); + while (--field_width > 0) + APPEND_BUFFER_SAFE(str, end, ' '); + continue; + + case 's': + s = va_arg(args, char *); + len = strnlen(s, precision); + + if (!(flags & LEFT)) + while (len < field_width--) + APPEND_BUFFER_SAFE(str, end, ' '); + for (i = 0; i < len; ++i) + APPEND_BUFFER_SAFE(str, end, *s++); + while (len < field_width--) + APPEND_BUFFER_SAFE(str, end, ' '); + continue; + + case 'p': + if (field_width == -1) { + field_width = 2 * sizeof(void *); + flags |= SPECIAL | SMALL | ZEROPAD; + } + str = number(str, end, + (uint64_t)va_arg(args, void *), 16, + field_width, precision, flags); + continue; + + case 'n': + if (qualifier == 'l') { + long *ip = va_arg(args, long *); + *ip = (str - buf); + } else { + int *ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + APPEND_BUFFER_SAFE(str, end, '%'); + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'x': + flags |= SMALL; + case 'X': + base = 16; + break; + + case 'd': + case 'i': + flags |= SIGN; + case 'u': + break; + + default: + APPEND_BUFFER_SAFE(str, end, '%'); + if (*fmt) + APPEND_BUFFER_SAFE(str, end, *fmt); + else + --fmt; + continue; + } + if (qualifier == 'l') + num = va_arg(args, uint64_t); + else if (qualifier == 'h') { + num = (uint16_t)va_arg(args, int); + if (flags & SIGN) + num = (int16_t)num; + } else if (flags & SIGN) + num = va_arg(args, int); + else + num = va_arg(args, uint32_t); + str = number(str, end, num, base, field_width, precision, flags); + } + + GUEST_ASSERT(str < end); + *str = '\0'; + return str - buf; +} + +int guest_snprintf(char *buf, int n, const char *fmt, ...) +{ + va_list va; + int len; + + va_start(va, fmt); + len = guest_vsnprintf(buf, n, fmt, va); + va_end(va); + + return len; +} -- cgit From 215a681710a554b224d743e4a9cd68965889713c Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Fri, 28 Jul 2023 17:36:15 -0700 Subject: KVM: selftests: Add additional pages to the guest to accommodate ucall Add additional pages to the guest to account for the number of pages the ucall headers need. The only reason things worked before is the ucall headers are fairly small. If they were ever to increase in size the guest could run out of memory. This is done in preparation for adding string formatting options to the guest through the ucall framework which increases the size of the ucall headers. Fixes: 426729b2cf2e ("KVM: selftests: Add ucall pool based implementation") Signed-off-by: Aaron Lewis Link: https://lore.kernel.org/r/20230729003643.1053367-7-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/ucall_common.h | 1 + tools/testing/selftests/kvm/lib/kvm_util.c | 4 ++++ tools/testing/selftests/kvm/lib/ucall_common.c | 5 +++++ 3 files changed, 10 insertions(+) diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h index 1a6aaef5ccae..bcbb362aa77f 100644 --- a/tools/testing/selftests/kvm/include/ucall_common.h +++ b/tools/testing/selftests/kvm/include/ucall_common.h @@ -34,6 +34,7 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu); void ucall(uint64_t cmd, int nargs, ...); uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); +int ucall_nr_pages_required(uint64_t page_size); /* * Perform userspace call without any associated data. This bare call avoids diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 3170d7a4520b..7a8af1821f5d 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -312,6 +312,7 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus, uint64_t extra_mem_pages) { + uint64_t page_size = vm_guest_mode_params[mode].page_size; uint64_t nr_pages; TEST_ASSERT(nr_runnable_vcpus, @@ -340,6 +341,9 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode, */ nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2; + /* Account for the number of pages needed by ucall. */ + nr_pages += ucall_nr_pages_required(page_size); + return vm_adjust_num_guest_pages(mode, nr_pages); } diff --git a/tools/testing/selftests/kvm/lib/ucall_common.c b/tools/testing/selftests/kvm/lib/ucall_common.c index 2f0e2ea941cc..77ada362273d 100644 --- a/tools/testing/selftests/kvm/lib/ucall_common.c +++ b/tools/testing/selftests/kvm/lib/ucall_common.c @@ -11,6 +11,11 @@ struct ucall_header { struct ucall ucalls[KVM_MAX_VCPUS]; }; +int ucall_nr_pages_required(uint64_t page_size) +{ + return align_up(sizeof(struct ucall_header), page_size) / page_size; +} + /* * ucall_pool holds per-VM values (global data is duplicated by each VM), it * must not be accessed from host code. -- cgit From 57e5c1fef5ecc7c346ff971817ba6985643e0a24 Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Fri, 28 Jul 2023 17:36:16 -0700 Subject: KVM: selftests: Add string formatting options to ucall Add more flexibility to guest debugging and testing by adding GUEST_PRINTF() and GUEST_ASSERT_FMT() to the ucall framework. Add a sized buffer to the ucall structure to hold the formatted string, i.e. to allow the guest to easily resolve the string, and thus avoid the ugly pattern of the host side having to make assumptions about the desired format, as well as having to pass around a large number of parameters. The buffer size was chosen to accommodate most use cases, and based on similar usage. E.g. printf() uses the same size buffer in arch/x86/boot/printf.c. And 1KiB ought to be enough for anybody. Signed-off-by: Aaron Lewis [sean: massage changelog, wrap macro param in ()] Link: https://lore.kernel.org/r/20230729003643.1053367-8-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/ucall_common.h | 7 +++++++ tools/testing/selftests/kvm/lib/ucall_common.c | 17 +++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h index bcbb362aa77f..b5548aeba9f0 100644 --- a/tools/testing/selftests/kvm/include/ucall_common.h +++ b/tools/testing/selftests/kvm/include/ucall_common.h @@ -13,15 +13,18 @@ enum { UCALL_NONE, UCALL_SYNC, UCALL_ABORT, + UCALL_PRINTF, UCALL_DONE, UCALL_UNHANDLED, }; #define UCALL_MAX_ARGS 7 +#define UCALL_BUFFER_LEN 1024 struct ucall { uint64_t cmd; uint64_t args[UCALL_MAX_ARGS]; + char buffer[UCALL_BUFFER_LEN]; /* Host virtual address of this struct. */ struct ucall *hva; @@ -32,6 +35,7 @@ void ucall_arch_do_ucall(vm_vaddr_t uc); void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu); void ucall(uint64_t cmd, int nargs, ...); +void ucall_fmt(uint64_t cmd, const char *fmt, ...); uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); int ucall_nr_pages_required(uint64_t page_size); @@ -47,8 +51,11 @@ int ucall_nr_pages_required(uint64_t page_size); #define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \ ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4) #define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage) +#define GUEST_PRINTF(_fmt, _args...) ucall_fmt(UCALL_PRINTF, _fmt, ##_args) #define GUEST_DONE() ucall(UCALL_DONE, 0) +#define REPORT_GUEST_PRINTF(ucall) pr_info("%s", (ucall).buffer) + enum guest_assert_builtin_args { GUEST_ERROR_STRING, GUEST_FILE, diff --git a/tools/testing/selftests/kvm/lib/ucall_common.c b/tools/testing/selftests/kvm/lib/ucall_common.c index 77ada362273d..b507db91139b 100644 --- a/tools/testing/selftests/kvm/lib/ucall_common.c +++ b/tools/testing/selftests/kvm/lib/ucall_common.c @@ -75,6 +75,23 @@ static void ucall_free(struct ucall *uc) clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use); } +void ucall_fmt(uint64_t cmd, const char *fmt, ...) +{ + struct ucall *uc; + va_list va; + + uc = ucall_alloc(); + uc->cmd = cmd; + + va_start(va, fmt); + guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); + va_end(va); + + ucall_arch_do_ucall((vm_vaddr_t)uc->hva); + + ucall_free(uc); +} + void ucall(uint64_t cmd, int nargs, ...) { struct ucall *uc; -- cgit From 289c2b4db8f336b96d918eb0c548382aee71ed79 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:17 -0700 Subject: KVM: selftests: Add formatted guest assert support in ucall framework Add printf-based GUEST_ASSERT macros and accompanying host-side support to provide an assert-specific versions of GUEST_PRINTF(). To make it easier to parse assert messages, for humans and bots alike, preserve/use the same layout as host asserts, e.g. in the example below, the reported expression, file, line number, and message are from the guest assertion, not the host reporting of the assertion. The call stack still captures the host reporting, but capturing the guest stack is a less pressing concern, i.e. can be done in the future, and an optimal solution would capture *both* the host and guest stacks, i.e. capturing the host stack isn't an outright bug. Running soft int test ==== Test Assertion Failure ==== x86_64/svm_nested_soft_inject_test.c:39: regs->rip != (unsigned long)l2_guest_code_int pid=214104 tid=214104 errno=4 - Interrupted system call 1 0x0000000000401b35: run_test at svm_nested_soft_inject_test.c:191 2 0x00000000004017d2: main at svm_nested_soft_inject_test.c:212 3 0x0000000000415b03: __libc_start_call_main at libc-start.o:? 4 0x000000000041714f: __libc_start_main_impl at ??:? 5 0x0000000000401660: _start at ??:? Expected IRQ at RIP 0x401e50, received IRQ at 0x401e50 Don't bother sharing code between ucall_assert() and ucall_fmt(), as forwarding the variable arguments would either require using macros or building a va_list, i.e. would make the code less readable and/or require just as much copy+paste code anyways. Gate the new macros with a flag so that tests can more or less be switched over one-by-one. The slow conversion won't be perfect, e.g. library code won't pick up the flag, but the only asserts in library code are of the vanilla GUEST_ASSERT() variety, i.e. don't print out variables. Add a temporary alias to GUEST_ASSERT_1() to fudge around ARM's arch_timer.h header using GUEST_ASSERT_1(), thus thwarting any attempt to convert tests one-by-one. Link: https://lore.kernel.org/r/20230729003643.1053367-9-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/ucall_common.h | 48 ++++++++++++++++++++++ tools/testing/selftests/kvm/lib/ucall_common.c | 22 ++++++++++ 2 files changed, 70 insertions(+) diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h index b5548aeba9f0..4ce11c15285a 100644 --- a/tools/testing/selftests/kvm/include/ucall_common.h +++ b/tools/testing/selftests/kvm/include/ucall_common.h @@ -36,6 +36,8 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu); void ucall(uint64_t cmd, int nargs, ...); void ucall_fmt(uint64_t cmd, const char *fmt, ...); +void ucall_assert(uint64_t cmd, const char *exp, const char *file, + unsigned int line, const char *fmt, ...); uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); int ucall_nr_pages_required(uint64_t page_size); @@ -63,6 +65,50 @@ enum guest_assert_builtin_args { GUEST_ASSERT_BUILTIN_NARGS }; +#ifdef USE_GUEST_ASSERT_PRINTF +#define ____GUEST_ASSERT(_condition, _exp, _fmt, _args...) \ +do { \ + if (!(_condition)) \ + ucall_assert(UCALL_ABORT, _exp, __FILE__, __LINE__, _fmt, ##_args); \ +} while (0) + +#define __GUEST_ASSERT(_condition, _fmt, _args...) \ + ____GUEST_ASSERT(_condition, #_condition, _fmt, ##_args) + +#define GUEST_ASSERT(_condition) \ + __GUEST_ASSERT(_condition, #_condition) + +#define GUEST_FAIL(_fmt, _args...) \ + ucall_assert(UCALL_ABORT, "Unconditional guest failure", \ + __FILE__, __LINE__, _fmt, ##_args) + +#define GUEST_ASSERT_EQ(a, b) \ +do { \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + ____GUEST_ASSERT(__a == __b, #a " == " #b, "%#lx != %#lx (%s != %s)", \ + (unsigned long)(__a), (unsigned long)(__b), #a, #b); \ +} while (0) + +#define GUEST_ASSERT_NE(a, b) \ +do { \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + ____GUEST_ASSERT(__a != __b, #a " != " #b, "%#lx == %#lx (%s == %s)", \ + (unsigned long)(__a), (unsigned long)(__b), #a, #b); \ +} while (0) + +#define REPORT_GUEST_ASSERT(ucall) \ + test_assert(false, (const char *)(ucall).args[GUEST_ERROR_STRING], \ + (const char *)(ucall).args[GUEST_FILE], \ + (ucall).args[GUEST_LINE], "%s", (ucall).buffer) + +/* FIXME: Drop this alias once the param-based guest asserts are gone. */ +#define GUEST_ASSERT_1(_condition, arg1) \ + __GUEST_ASSERT(_condition, "arg1 = 0x%lx", arg1) + +#else + #define __GUEST_ASSERT(_condition, _condstr, _nargs, _args...) \ do { \ if (!(_condition)) \ @@ -129,4 +175,6 @@ do { \ #define REPORT_GUEST_ASSERT_N(ucall, fmt, args...) \ __REPORT_GUEST_ASSERT((ucall), fmt, ##args) +#endif /* USE_GUEST_ASSERT_PRINTF */ + #endif /* SELFTEST_KVM_UCALL_COMMON_H */ diff --git a/tools/testing/selftests/kvm/lib/ucall_common.c b/tools/testing/selftests/kvm/lib/ucall_common.c index b507db91139b..816a3fa109bf 100644 --- a/tools/testing/selftests/kvm/lib/ucall_common.c +++ b/tools/testing/selftests/kvm/lib/ucall_common.c @@ -75,6 +75,28 @@ static void ucall_free(struct ucall *uc) clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use); } +void ucall_assert(uint64_t cmd, const char *exp, const char *file, + unsigned int line, const char *fmt, ...) +{ + struct ucall *uc; + va_list va; + + uc = ucall_alloc(); + uc->cmd = cmd; + + WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (uint64_t)(exp)); + WRITE_ONCE(uc->args[GUEST_FILE], (uint64_t)(file)); + WRITE_ONCE(uc->args[GUEST_LINE], line); + + va_start(va, fmt); + guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); + va_end(va); + + ucall_arch_do_ucall((vm_vaddr_t)uc->hva); + + ucall_free(uc); +} + void ucall_fmt(uint64_t cmd, const char *fmt, ...) { struct ucall *uc; -- cgit From b35f4c73d389d3b04e5edf3d1fd041ed3070228b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 31 Jul 2023 13:30:24 -0700 Subject: KVM: selftests: Add arch ucall.h and inline simple arch hooks Add an architecture specific ucall.h and inline the simple arch hooks, e.g. the init hook for everything except ARM, and the actual "do ucall" hook for everything except x86 (which should be simple, but temporarily isn't due to carrying a workaround). Having a per-arch ucall header will allow adding a #define for the expected KVM exit reason for a ucall that is colocated (for everything except x86) with the ucall itself. Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20230731203026.1192091-2-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/aarch64/ucall.h | 18 ++++++++++++++++++ tools/testing/selftests/kvm/include/riscv/ucall.h | 18 ++++++++++++++++++ tools/testing/selftests/kvm/include/s390x/ucall.h | 17 +++++++++++++++++ tools/testing/selftests/kvm/include/ucall_common.h | 1 + tools/testing/selftests/kvm/include/x86_64/ucall.h | 11 +++++++++++ tools/testing/selftests/kvm/lib/aarch64/ucall.c | 11 +---------- tools/testing/selftests/kvm/lib/riscv/ucall.c | 11 ----------- tools/testing/selftests/kvm/lib/s390x/ucall.c | 10 ---------- tools/testing/selftests/kvm/lib/x86_64/ucall.c | 4 ---- 9 files changed, 66 insertions(+), 35 deletions(-) create mode 100644 tools/testing/selftests/kvm/include/aarch64/ucall.h create mode 100644 tools/testing/selftests/kvm/include/riscv/ucall.h create mode 100644 tools/testing/selftests/kvm/include/s390x/ucall.h create mode 100644 tools/testing/selftests/kvm/include/x86_64/ucall.h diff --git a/tools/testing/selftests/kvm/include/aarch64/ucall.h b/tools/testing/selftests/kvm/include/aarch64/ucall.h new file mode 100644 index 000000000000..fe65fdf4f0d3 --- /dev/null +++ b/tools/testing/selftests/kvm/include/aarch64/ucall.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef SELFTEST_KVM_UCALL_H +#define SELFTEST_KVM_UCALL_H + +#include "kvm_util_base.h" + +/* + * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each + * VM), it must not be accessed from host code. + */ +extern vm_vaddr_t *ucall_exit_mmio_addr; + +static inline void ucall_arch_do_ucall(vm_vaddr_t uc) +{ + WRITE_ONCE(*ucall_exit_mmio_addr, uc); +} + +#endif diff --git a/tools/testing/selftests/kvm/include/riscv/ucall.h b/tools/testing/selftests/kvm/include/riscv/ucall.h new file mode 100644 index 000000000000..86ed0500972b --- /dev/null +++ b/tools/testing/selftests/kvm/include/riscv/ucall.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef SELFTEST_KVM_UCALL_H +#define SELFTEST_KVM_UCALL_H + +#include "processor.h" + +static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +{ +} + +static inline void ucall_arch_do_ucall(vm_vaddr_t uc) +{ + sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT, + KVM_RISCV_SELFTESTS_SBI_UCALL, + uc, 0, 0, 0, 0, 0); +} + +#endif diff --git a/tools/testing/selftests/kvm/include/s390x/ucall.h b/tools/testing/selftests/kvm/include/s390x/ucall.h new file mode 100644 index 000000000000..47ad4b1fbccb --- /dev/null +++ b/tools/testing/selftests/kvm/include/s390x/ucall.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef SELFTEST_KVM_UCALL_H +#define SELFTEST_KVM_UCALL_H + +#include "kvm_util_base.h" + +static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +{ +} + +static inline void ucall_arch_do_ucall(vm_vaddr_t uc) +{ + /* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */ + asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory"); +} + +#endif diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h index 4ce11c15285a..9e5948dab030 100644 --- a/tools/testing/selftests/kvm/include/ucall_common.h +++ b/tools/testing/selftests/kvm/include/ucall_common.h @@ -7,6 +7,7 @@ #ifndef SELFTEST_KVM_UCALL_COMMON_H #define SELFTEST_KVM_UCALL_COMMON_H #include "test_util.h" +#include "ucall.h" /* Common ucalls */ enum { diff --git a/tools/testing/selftests/kvm/include/x86_64/ucall.h b/tools/testing/selftests/kvm/include/x86_64/ucall.h new file mode 100644 index 000000000000..05cc69b0d550 --- /dev/null +++ b/tools/testing/selftests/kvm/include/x86_64/ucall.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef SELFTEST_KVM_UCALL_H +#define SELFTEST_KVM_UCALL_H + +#include "kvm_util_base.h" + +static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +{ +} + +#endif diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c index f212bd8ab93d..ddab0ce89d4d 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c +++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c @@ -6,11 +6,7 @@ */ #include "kvm_util.h" -/* - * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each - * VM), it must not be accessed from host code. - */ -static vm_vaddr_t *ucall_exit_mmio_addr; +vm_vaddr_t *ucall_exit_mmio_addr; void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) { @@ -23,11 +19,6 @@ void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva); } -void ucall_arch_do_ucall(vm_vaddr_t uc) -{ - WRITE_ONCE(*ucall_exit_mmio_addr, uc); -} - void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; diff --git a/tools/testing/selftests/kvm/lib/riscv/ucall.c b/tools/testing/selftests/kvm/lib/riscv/ucall.c index 9a3476a2dfca..fe6d1004f018 100644 --- a/tools/testing/selftests/kvm/lib/riscv/ucall.c +++ b/tools/testing/selftests/kvm/lib/riscv/ucall.c @@ -10,10 +10,6 @@ #include "kvm_util.h" #include "processor.h" -void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) -{ -} - struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4, @@ -40,13 +36,6 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, return ret; } -void ucall_arch_do_ucall(vm_vaddr_t uc) -{ - sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT, - KVM_RISCV_SELFTESTS_SBI_UCALL, - uc, 0, 0, 0, 0, 0); -} - void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; diff --git a/tools/testing/selftests/kvm/lib/s390x/ucall.c b/tools/testing/selftests/kvm/lib/s390x/ucall.c index a7f02dc372cf..cca98734653d 100644 --- a/tools/testing/selftests/kvm/lib/s390x/ucall.c +++ b/tools/testing/selftests/kvm/lib/s390x/ucall.c @@ -6,16 +6,6 @@ */ #include "kvm_util.h" -void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) -{ -} - -void ucall_arch_do_ucall(vm_vaddr_t uc) -{ - /* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */ - asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory"); -} - void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; diff --git a/tools/testing/selftests/kvm/lib/x86_64/ucall.c b/tools/testing/selftests/kvm/lib/x86_64/ucall.c index a53df3ece2f8..1265cecc7dd1 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/ucall.c +++ b/tools/testing/selftests/kvm/lib/x86_64/ucall.c @@ -8,10 +8,6 @@ #define UCALL_PIO_PORT ((uint16_t)0x1000) -void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) -{ -} - void ucall_arch_do_ucall(vm_vaddr_t uc) { /* -- cgit From edb5b700f9f8a21e57aef14cf795fb958cc38628 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 31 Jul 2023 13:30:25 -0700 Subject: KVM: selftests: Add #define of expected KVM exit reason for ucall Define the expected architecture specific exit reason for a successful ucall so that common tests can assert that a ucall occurred without the test needing to implement arch specific code. Suggested-by: Andrew Jones Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20230731203026.1192091-3-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/aarch64/ucall.h | 2 ++ tools/testing/selftests/kvm/include/riscv/ucall.h | 2 ++ tools/testing/selftests/kvm/include/s390x/ucall.h | 2 ++ tools/testing/selftests/kvm/include/x86_64/ucall.h | 2 ++ 4 files changed, 8 insertions(+) diff --git a/tools/testing/selftests/kvm/include/aarch64/ucall.h b/tools/testing/selftests/kvm/include/aarch64/ucall.h index fe65fdf4f0d3..4b68f37efd36 100644 --- a/tools/testing/selftests/kvm/include/aarch64/ucall.h +++ b/tools/testing/selftests/kvm/include/aarch64/ucall.h @@ -4,6 +4,8 @@ #include "kvm_util_base.h" +#define UCALL_EXIT_REASON KVM_EXIT_MMIO + /* * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each * VM), it must not be accessed from host code. diff --git a/tools/testing/selftests/kvm/include/riscv/ucall.h b/tools/testing/selftests/kvm/include/riscv/ucall.h index 86ed0500972b..be46eb32ec27 100644 --- a/tools/testing/selftests/kvm/include/riscv/ucall.h +++ b/tools/testing/selftests/kvm/include/riscv/ucall.h @@ -4,6 +4,8 @@ #include "processor.h" +#define UCALL_EXIT_REASON KVM_EXIT_RISCV_SBI + static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) { } diff --git a/tools/testing/selftests/kvm/include/s390x/ucall.h b/tools/testing/selftests/kvm/include/s390x/ucall.h index 47ad4b1fbccb..b231bf2e49d6 100644 --- a/tools/testing/selftests/kvm/include/s390x/ucall.h +++ b/tools/testing/selftests/kvm/include/s390x/ucall.h @@ -4,6 +4,8 @@ #include "kvm_util_base.h" +#define UCALL_EXIT_REASON KVM_EXIT_S390_SIEIC + static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) { } diff --git a/tools/testing/selftests/kvm/include/x86_64/ucall.h b/tools/testing/selftests/kvm/include/x86_64/ucall.h index 05cc69b0d550..06b244bd06ee 100644 --- a/tools/testing/selftests/kvm/include/x86_64/ucall.h +++ b/tools/testing/selftests/kvm/include/x86_64/ucall.h @@ -4,6 +4,8 @@ #include "kvm_util_base.h" +#define UCALL_EXIT_REASON KVM_EXIT_IO + static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) { } -- cgit From 5d1d46f9d56fb567718175cb0cb7a084d444a9d4 Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Mon, 31 Jul 2023 13:30:26 -0700 Subject: KVM: selftests: Add a selftest for guest prints and formatted asserts Add a test to exercise the various features in KVM selftest's local snprintf() and compare them to LIBC's snprintf() to ensure they behave the same. This is not an exhaustive test. KVM's local snprintf() does not implement all the features LIBC does, e.g. KVM's local snprintf() does not support floats or doubles, so testing for those features were excluded. Testing was added for the features that are expected to work to support a minimal version of printf() in the guest. Signed-off-by: Aaron Lewis [sean: use UCALL_EXIT_REASON, enable for all architectures] Link: https://lore.kernel.org/r/20230731203026.1192091-4-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/Makefile | 4 + tools/testing/selftests/kvm/guest_print_test.c | 221 +++++++++++++++++++++++++ 2 files changed, 225 insertions(+) create mode 100644 tools/testing/selftests/kvm/guest_print_test.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index f65889f5a083..77026907968f 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -123,6 +123,7 @@ TEST_GEN_PROGS_x86_64 += access_tracking_perf_test TEST_GEN_PROGS_x86_64 += demand_paging_test TEST_GEN_PROGS_x86_64 += dirty_log_test TEST_GEN_PROGS_x86_64 += dirty_log_perf_test +TEST_GEN_PROGS_x86_64 += guest_print_test TEST_GEN_PROGS_x86_64 += hardware_disable_test TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus TEST_GEN_PROGS_x86_64 += kvm_page_table_test @@ -153,6 +154,7 @@ TEST_GEN_PROGS_aarch64 += access_tracking_perf_test TEST_GEN_PROGS_aarch64 += demand_paging_test TEST_GEN_PROGS_aarch64 += dirty_log_test TEST_GEN_PROGS_aarch64 += dirty_log_perf_test +TEST_GEN_PROGS_aarch64 += guest_print_test TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus TEST_GEN_PROGS_aarch64 += kvm_page_table_test TEST_GEN_PROGS_aarch64 += memslot_modification_stress_test @@ -169,6 +171,7 @@ TEST_GEN_PROGS_s390x += s390x/tprot TEST_GEN_PROGS_s390x += s390x/cmma_test TEST_GEN_PROGS_s390x += demand_paging_test TEST_GEN_PROGS_s390x += dirty_log_test +TEST_GEN_PROGS_s390x += guest_print_test TEST_GEN_PROGS_s390x += kvm_create_max_vcpus TEST_GEN_PROGS_s390x += kvm_page_table_test TEST_GEN_PROGS_s390x += rseq_test @@ -177,6 +180,7 @@ TEST_GEN_PROGS_s390x += kvm_binary_stats_test TEST_GEN_PROGS_riscv += demand_paging_test TEST_GEN_PROGS_riscv += dirty_log_test +TEST_GEN_PROGS_riscv += guest_print_test TEST_GEN_PROGS_riscv += kvm_create_max_vcpus TEST_GEN_PROGS_riscv += kvm_page_table_test TEST_GEN_PROGS_riscv += set_memory_region_test diff --git a/tools/testing/selftests/kvm/guest_print_test.c b/tools/testing/selftests/kvm/guest_print_test.c new file mode 100644 index 000000000000..267e01d057fb --- /dev/null +++ b/tools/testing/selftests/kvm/guest_print_test.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * A test for GUEST_PRINTF + * + * Copyright 2022, Google, Inc. and/or its affiliates. + */ +#define USE_GUEST_ASSERT_PRINTF 1 + +#include +#include +#include +#include +#include + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" + +struct guest_vals { + uint64_t a; + uint64_t b; + uint64_t type; +}; + +static struct guest_vals vals; + +/* GUEST_PRINTF()/GUEST_ASSERT_FMT() does not support float or double. */ +#define TYPE_LIST \ +TYPE(test_type_i64, I64, "%ld", int64_t) \ +TYPE(test_type_u64, U64u, "%lu", uint64_t) \ +TYPE(test_type_x64, U64x, "0x%lx", uint64_t) \ +TYPE(test_type_X64, U64X, "0x%lX", uint64_t) \ +TYPE(test_type_u32, U32u, "%u", uint32_t) \ +TYPE(test_type_x32, U32x, "0x%x", uint32_t) \ +TYPE(test_type_X32, U32X, "0x%X", uint32_t) \ +TYPE(test_type_int, INT, "%d", int) \ +TYPE(test_type_char, CHAR, "%c", char) \ +TYPE(test_type_str, STR, "'%s'", const char *) \ +TYPE(test_type_ptr, PTR, "%p", uintptr_t) + +enum args_type { +#define TYPE(fn, ext, fmt_t, T) TYPE_##ext, + TYPE_LIST +#undef TYPE +}; + +static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf, + const char *expected_assert); + +#define BUILD_TYPE_STRINGS_AND_HELPER(fn, ext, fmt_t, T) \ +const char *PRINTF_FMT_##ext = "Got params a = " fmt_t " and b = " fmt_t; \ +const char *ASSERT_FMT_##ext = "Expected " fmt_t ", got " fmt_t " instead"; \ +static void fn(struct kvm_vcpu *vcpu, T a, T b) \ +{ \ + char expected_printf[UCALL_BUFFER_LEN]; \ + char expected_assert[UCALL_BUFFER_LEN]; \ + \ + snprintf(expected_printf, UCALL_BUFFER_LEN, PRINTF_FMT_##ext, a, b); \ + snprintf(expected_assert, UCALL_BUFFER_LEN, ASSERT_FMT_##ext, a, b); \ + vals = (struct guest_vals){ (uint64_t)a, (uint64_t)b, TYPE_##ext }; \ + sync_global_to_guest(vcpu->vm, vals); \ + run_test(vcpu, expected_printf, expected_assert); \ +} + +#define TYPE(fn, ext, fmt_t, T) \ + BUILD_TYPE_STRINGS_AND_HELPER(fn, ext, fmt_t, T) + TYPE_LIST +#undef TYPE + +static void guest_code(void) +{ + while (1) { + switch (vals.type) { +#define TYPE(fn, ext, fmt_t, T) \ + case TYPE_##ext: \ + GUEST_PRINTF(PRINTF_FMT_##ext, vals.a, vals.b); \ + __GUEST_ASSERT(vals.a == vals.b, \ + ASSERT_FMT_##ext, vals.a, vals.b); \ + break; + TYPE_LIST +#undef TYPE + default: + GUEST_SYNC(vals.type); + } + + GUEST_DONE(); + } +} + +/* + * Unfortunately this gets a little messy because 'assert_msg' doesn't + * just contains the matching string, it also contains additional assert + * info. Fortunately the part that matches should be at the very end of + * 'assert_msg'. + */ +static void ucall_abort(const char *assert_msg, const char *expected_assert_msg) +{ + int len_str = strlen(assert_msg); + int len_substr = strlen(expected_assert_msg); + int offset = len_str - len_substr; + + TEST_ASSERT(len_substr <= len_str, + "Expected '%s' to be a substring of '%s'\n", + assert_msg, expected_assert_msg); + + TEST_ASSERT(strcmp(&assert_msg[offset], expected_assert_msg) == 0, + "Unexpected mismatch. Expected: '%s', got: '%s'", + expected_assert_msg, &assert_msg[offset]); +} + +static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf, + const char *expected_assert) +{ + struct kvm_run *run = vcpu->run; + struct ucall uc; + + while (1) { + vcpu_run(vcpu); + + TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON, + "Unexpected exit reason: %u (%s),\n", + run->exit_reason, exit_reason_str(run->exit_reason)); + + switch (get_ucall(vcpu, &uc)) { + case UCALL_SYNC: + TEST_FAIL("Unknown 'args_type' = %lu", uc.args[1]); + break; + case UCALL_PRINTF: + TEST_ASSERT(strcmp(uc.buffer, expected_printf) == 0, + "Unexpected mismatch. Expected: '%s', got: '%s'", + expected_printf, uc.buffer); + break; + case UCALL_ABORT: + ucall_abort(uc.buffer, expected_assert); + break; + case UCALL_DONE: + return; + default: + TEST_FAIL("Unknown ucall %lu", uc.cmd); + } + } +} + +static void guest_code_limits(void) +{ + char test_str[UCALL_BUFFER_LEN + 10]; + + memset(test_str, 'a', sizeof(test_str)); + test_str[sizeof(test_str) - 1] = 0; + + GUEST_PRINTF("%s", test_str); +} + +static void test_limits(void) +{ + struct kvm_vcpu *vcpu; + struct kvm_run *run; + struct kvm_vm *vm; + struct ucall uc; + + vm = vm_create_with_one_vcpu(&vcpu, guest_code_limits); + run = vcpu->run; + vcpu_run(vcpu); + + TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON, + "Unexpected exit reason: %u (%s),\n", + run->exit_reason, exit_reason_str(run->exit_reason)); + + TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_ABORT, + "Unexpected ucall command: %lu, Expected: %u (UCALL_ABORT)\n", + uc.cmd, UCALL_ABORT); + + kvm_vm_free(vm); +} + +int main(int argc, char *argv[]) +{ + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + + vm = vm_create_with_one_vcpu(&vcpu, guest_code); + + test_type_i64(vcpu, -1, -1); + test_type_i64(vcpu, -1, 1); + test_type_i64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef); + test_type_i64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee); + + test_type_u64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef); + test_type_u64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee); + test_type_x64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef); + test_type_x64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee); + test_type_X64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef); + test_type_X64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee); + + test_type_u32(vcpu, 0x90abcdef, 0x90abcdef); + test_type_u32(vcpu, 0x90abcdef, 0x90abcdee); + test_type_x32(vcpu, 0x90abcdef, 0x90abcdef); + test_type_x32(vcpu, 0x90abcdef, 0x90abcdee); + test_type_X32(vcpu, 0x90abcdef, 0x90abcdef); + test_type_X32(vcpu, 0x90abcdef, 0x90abcdee); + + test_type_int(vcpu, -1, -1); + test_type_int(vcpu, -1, 1); + test_type_int(vcpu, 1, 1); + + test_type_char(vcpu, 'a', 'a'); + test_type_char(vcpu, 'a', 'A'); + test_type_char(vcpu, 'a', 'b'); + + test_type_str(vcpu, "foo", "foo"); + test_type_str(vcpu, "foo", "bar"); + + test_type_ptr(vcpu, 0x1234567890abcdef, 0x1234567890abcdef); + test_type_ptr(vcpu, 0x1234567890abcdef, 0x1234567890abcdee); + + kvm_vm_free(vm); + + test_limits(); + + return 0; +} -- cgit From db44e1c871bcf6228b9447aada421088e036692a Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:19 -0700 Subject: KVM: selftests: Convert aarch_timer to printf style GUEST_ASSERT Convert ARM's aarch_timer test to use printf-based GUEST_ASSERT(). To maintain existing functionality, manually print the host information, e.g. stage and iteration, to stderr prior to reporting the guest assert. Link: https://lore.kernel.org/r/20230729003643.1053367-11-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/aarch64/arch_timer.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c index 8ef370924a02..b53bcf126e6a 100644 --- a/tools/testing/selftests/kvm/aarch64/arch_timer.c +++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c @@ -19,6 +19,7 @@ * * Copyright (c) 2021, Google LLC. */ +#define USE_GUEST_ASSERT_PRINTF 1 #define _GNU_SOURCE @@ -155,11 +156,13 @@ static void guest_validate_irq(unsigned int intid, xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt); /* Make sure we are dealing with the correct timer IRQ */ - GUEST_ASSERT_2(intid == timer_irq, intid, timer_irq); + GUEST_ASSERT_EQ(intid, timer_irq); /* Basic 'timer condition met' check */ - GUEST_ASSERT_3(xcnt >= cval, xcnt, cval, xcnt_diff_us); - GUEST_ASSERT_1(xctl & CTL_ISTATUS, xctl); + __GUEST_ASSERT(xcnt >= cval, + "xcnt = 0x%llx, cval = 0x%llx, xcnt_diff_us = 0x%llx", + xcnt, cval, xcnt_diff_us); + __GUEST_ASSERT(xctl & CTL_ISTATUS, "xcnt = 0x%llx", xcnt); WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1); } @@ -192,8 +195,7 @@ static void guest_run_stage(struct test_vcpu_shared_data *shared_data, TIMER_TEST_ERR_MARGIN_US); irq_iter = READ_ONCE(shared_data->nr_iter); - GUEST_ASSERT_2(config_iter + 1 == irq_iter, - config_iter + 1, irq_iter); + GUEST_ASSERT_EQ(config_iter + 1, irq_iter); } } @@ -243,13 +245,9 @@ static void *test_vcpu_run(void *arg) break; case UCALL_ABORT: sync_global_from_guest(vm, *shared_data); - REPORT_GUEST_ASSERT_N(uc, "values: %lu, %lu; %lu, vcpu %u; stage; %u; iter: %u", - GUEST_ASSERT_ARG(uc, 0), - GUEST_ASSERT_ARG(uc, 1), - GUEST_ASSERT_ARG(uc, 2), - vcpu_idx, - shared_data->guest_stage, - shared_data->nr_iter); + fprintf(stderr, "Guest assert failed, vcpu %u; stage; %u; iter: %u\n", + vcpu_idx, shared_data->guest_stage, shared_data->nr_iter); + REPORT_GUEST_ASSERT(uc); break; default: TEST_FAIL("Unexpected guest exit\n"); -- cgit From bac9aeecc387a1436567b6e74f8257f7e8c4d194 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:20 -0700 Subject: KVM: selftests: Convert debug-exceptions to printf style GUEST_ASSERT Convert ARM's debug exceptions test to use printf-based GUEST_ASSERT(). Opportunistically Use GUEST_ASSERT_EQ() in guest_code_ss() so that the expected vs. actual values get printed out. Link: https://lore.kernel.org/r/20230729003643.1053367-12-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/aarch64/debug-exceptions.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c index 637be796086f..fdd5b05e1b0e 100644 --- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c +++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c @@ -1,4 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#define USE_GUEST_ASSERT_PRINTF 1 + #include #include #include @@ -365,7 +367,7 @@ static void guest_wp_handler(struct ex_regs *regs) static void guest_ss_handler(struct ex_regs *regs) { - GUEST_ASSERT_1(ss_idx < 4, ss_idx); + __GUEST_ASSERT(ss_idx < 4, "Expected index < 4, got '%u'", ss_idx); ss_addr[ss_idx++] = regs->pc; regs->pstate |= SPSR_SS; } @@ -410,8 +412,8 @@ static void guest_code_ss(int test_cnt) /* Userspace disables Single Step when the end is nigh. */ asm volatile("iter_ss_end:\n"); - GUEST_ASSERT(bvr == w_bvr); - GUEST_ASSERT(wvr == w_wvr); + GUEST_ASSERT_EQ(bvr, w_bvr); + GUEST_ASSERT_EQ(wvr, w_wvr); } GUEST_DONE(); } @@ -450,7 +452,7 @@ static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bp vcpu_run(vcpu); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: - REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx"); + REPORT_GUEST_ASSERT(uc); break; case UCALL_DONE: goto done; -- cgit From af5b41b97f1cc9d766f700197955965872aba4a3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:21 -0700 Subject: KVM: selftests: Convert ARM's hypercalls test to printf style GUEST_ASSERT Convert ARM's hypercalls test to use printf-based GUEST_ASSERT(). Opportunistically use GUEST_FAIL() to complain about an unexpected stage. Link: https://lore.kernel.org/r/20230729003643.1053367-13-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/aarch64/hypercalls.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/hypercalls.c b/tools/testing/selftests/kvm/aarch64/hypercalls.c index bef1499fb465..94555a7d3c7e 100644 --- a/tools/testing/selftests/kvm/aarch64/hypercalls.c +++ b/tools/testing/selftests/kvm/aarch64/hypercalls.c @@ -8,6 +8,7 @@ * hypercalls are properly masked or unmasked to the guest when disabled or * enabled from the KVM userspace, respectively. */ +#define USE_GUEST_ASSERT_PRINTF 1 #include #include @@ -105,15 +106,17 @@ static void guest_test_hvc(const struct test_hvc_info *hc_info) switch (stage) { case TEST_STAGE_HVC_IFACE_FEAT_DISABLED: case TEST_STAGE_HVC_IFACE_FALSE_INFO: - GUEST_ASSERT_3(res.a0 == SMCCC_RET_NOT_SUPPORTED, - res.a0, hc_info->func_id, hc_info->arg1); + __GUEST_ASSERT(res.a0 == SMCCC_RET_NOT_SUPPORTED, + "a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u", + res.a0, hc_info->func_id, hc_info->arg1, stage); break; case TEST_STAGE_HVC_IFACE_FEAT_ENABLED: - GUEST_ASSERT_3(res.a0 != SMCCC_RET_NOT_SUPPORTED, - res.a0, hc_info->func_id, hc_info->arg1); + __GUEST_ASSERT(res.a0 != SMCCC_RET_NOT_SUPPORTED, + "a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u", + res.a0, hc_info->func_id, hc_info->arg1, stage); break; default: - GUEST_ASSERT_1(0, stage); + GUEST_FAIL("Unexpected stage = %u", stage); } } } @@ -132,7 +135,7 @@ static void guest_code(void) guest_test_hvc(false_hvc_info); break; default: - GUEST_ASSERT_1(0, stage); + GUEST_FAIL("Unexpected stage = %u", stage); } GUEST_SYNC(stage); @@ -290,10 +293,7 @@ static void test_run(void) guest_done = true; break; case UCALL_ABORT: - REPORT_GUEST_ASSERT_N(uc, "values: 0x%lx, 0x%lx; 0x%lx, stage: %u", - GUEST_ASSERT_ARG(uc, 0), - GUEST_ASSERT_ARG(uc, 1), - GUEST_ASSERT_ARG(uc, 2), stage); + REPORT_GUEST_ASSERT(uc); break; default: TEST_FAIL("Unexpected guest exit\n"); -- cgit From df27f6b45454c02919b84dfe3ffad3f665108d23 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:22 -0700 Subject: KVM: selftests: Convert ARM's page fault test to printf style GUEST_ASSERT Use GUEST_FAIL() in ARM's page fault test to report unexpected faults. Link: https://lore.kernel.org/r/20230729003643.1053367-14-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/aarch64/page_fault_test.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c index e5bb8767d2cb..0b0dd90feae5 100644 --- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c +++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c @@ -7,6 +7,7 @@ * hugetlbfs with a hole). It checks that the expected handling method is * called (e.g., uffd faults with the right address and write/read flag). */ +#define USE_GUEST_ASSERT_PRINTF 1 #define _GNU_SOURCE #include @@ -293,12 +294,12 @@ static void guest_code(struct test_desc *test) static void no_dabt_handler(struct ex_regs *regs) { - GUEST_ASSERT_1(false, read_sysreg(far_el1)); + GUEST_FAIL("Unexpected dabt, far_el1 = 0x%llx", read_sysreg(far_el1)); } static void no_iabt_handler(struct ex_regs *regs) { - GUEST_ASSERT_1(false, regs->pc); + GUEST_FAIL("Unexpected iabt, pc = 0x%lx", regs->pc); } static struct uffd_args { @@ -679,7 +680,7 @@ static void vcpu_run_loop(struct kvm_vm *vm, struct kvm_vcpu *vcpu, } break; case UCALL_ABORT: - REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx"); + REPORT_GUEST_ASSERT(uc); break; case UCALL_DONE: goto done; -- cgit From d0ad3bacc523cd780d573c890ebf3b14cec5df22 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:23 -0700 Subject: KVM: selftests: Convert ARM's vGIC IRQ test to printf style GUEST_ASSERT Use printf-based guest assert reporting in ARM's vGIC IRQ test. Note, this is not as innocuous as it looks! The printf-based version of GUEST_ASSERT_EQ() ensures the expressions are evaluated only once, whereas the old version did not! Link: https://lore.kernel.org/r/20230729003643.1053367-15-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/aarch64/vgic_irq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index 90d854e0fcff..67da33aa6d17 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -7,6 +7,7 @@ * host to inject a specific intid via a GUEST_SYNC call, and then checks that * it received it. */ +#define USE_GUEST_ASSERT_PRINTF 1 #include #include @@ -781,7 +782,7 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) run_guest_cmd(vcpu, gic_fd, &inject_args, &args); break; case UCALL_ABORT: - REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx"); + REPORT_GUEST_ASSERT(uc); break; case UCALL_DONE: goto done; -- cgit From c55a475d5fc42931992b667aa16bed2db6058d06 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:24 -0700 Subject: KVM: selftests: Convert the memslot performance test to printf guest asserts Use the printf-based GUEST_ASSERT_EQ() in the memslot perf test instead of an half-baked open code version. Link: https://lore.kernel.org/r/20230729003643.1053367-16-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/memslot_perf_test.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c index 4210cd21d159..55f1bc70e571 100644 --- a/tools/testing/selftests/kvm/memslot_perf_test.c +++ b/tools/testing/selftests/kvm/memslot_perf_test.c @@ -6,6 +6,8 @@ * * Basic guest setup / host vCPU thread code lifted from set_memory_region_test. */ +#define USE_GUEST_ASSERT_PRINTF 1 + #include #include #include @@ -157,7 +159,7 @@ static void *vcpu_worker(void *__data) goto done; break; case UCALL_ABORT: - REPORT_GUEST_ASSERT_1(uc, "val = %lu"); + REPORT_GUEST_ASSERT(uc); break; case UCALL_DONE: goto done; @@ -560,7 +562,7 @@ static void guest_code_test_memslot_rw(void) ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size) { uint64_t val = *(uint64_t *)ptr; - GUEST_ASSERT_1(val == MEM_TEST_VAL_2, val); + GUEST_ASSERT_EQ(val, MEM_TEST_VAL_2); *(uint64_t *)ptr = 0; } -- cgit From 428c76c769fa652e2186de464c74e34daede489f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:25 -0700 Subject: KVM: selftests: Convert s390's memop test to printf style GUEST_ASSERT Convert s390's memop test to printf-based GUEST_ASSERT, and opportunistically use GUEST_FAIL() to report invalid sizes. Link: https://lore.kernel.org/r/20230729003643.1053367-17-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/s390x/memop.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c index de73dc030905..a49173907cec 100644 --- a/tools/testing/selftests/kvm/s390x/memop.c +++ b/tools/testing/selftests/kvm/s390x/memop.c @@ -4,6 +4,7 @@ * * Copyright (C) 2019, Red Hat, Inc. */ +#define USE_GUEST_ASSERT_PRINTF 1 #include #include @@ -279,7 +280,7 @@ enum stage { vcpu_run(__vcpu); \ get_ucall(__vcpu, &uc); \ if (uc.cmd == UCALL_ABORT) { \ - REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \ + REPORT_GUEST_ASSERT(uc); \ } \ TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC); \ TEST_ASSERT_EQ(uc.args[1], __stage); \ @@ -469,7 +470,7 @@ static __uint128_t cut_to_size(int size, __uint128_t val) case 16: return val; } - GUEST_ASSERT_1(false, "Invalid size"); + GUEST_FAIL("Invalid size = %u", size); return 0; } @@ -598,7 +599,7 @@ static bool _cmpxchg(int size, void *target, __uint128_t *old_addr, __uint128_t return ret; } } - GUEST_ASSERT_1(false, "Invalid size"); + GUEST_FAIL("Invalid size = %u", size); return 0; } -- cgit From 5f82bbab84adcb8990f17503393628a210086b2c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:26 -0700 Subject: KVM: selftests: Convert s390's tprot test to printf style GUEST_ASSERT Convert s390's tprot test to printf-based GUEST_ASSERT. Link: https://lore.kernel.org/r/20230729003643.1053367-18-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/s390x/tprot.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/s390x/tprot.c b/tools/testing/selftests/kvm/s390x/tprot.c index 40d3ea16c052..c12c6824d963 100644 --- a/tools/testing/selftests/kvm/s390x/tprot.c +++ b/tools/testing/selftests/kvm/s390x/tprot.c @@ -4,6 +4,7 @@ * * Copyright IBM Corp. 2021 */ +#define USE_GUEST_ASSERT_PRINTF 1 #include #include "test_util.h" @@ -156,7 +157,9 @@ static enum stage perform_next_stage(int *i, bool mapped_0) !mapped_0; if (!skip) { result = test_protection(tests[*i].addr, tests[*i].key); - GUEST_ASSERT_2(result == tests[*i].expected, *i, result); + __GUEST_ASSERT(result == tests[*i].expected, + "Wanted %u, got %u, for i = %u", + tests[*i].expected, result, *i); } } return stage; @@ -190,7 +193,7 @@ static void guest_code(void) vcpu_run(__vcpu); \ get_ucall(__vcpu, &uc); \ if (uc.cmd == UCALL_ABORT) \ - REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \ + REPORT_GUEST_ASSERT(uc); \ TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC); \ TEST_ASSERT_EQ(uc.args[1], __stage); \ }) -- cgit From 9291c9cef5b52b64a20f92ea63341d903f0ae328 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:27 -0700 Subject: KVM: selftests: Convert set_memory_region_test to printf-based GUEST_ASSERT Convert set_memory_region_test to print-based GUEST_ASSERT, using a combo of newfangled macros to report (hopefully) useful information. Link: https://lore.kernel.org/r/20230729003643.1053367-19-seanjc@google.com Signed-off-by: Sean Christopherson --- .../testing/selftests/kvm/set_memory_region_test.c | 23 +++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c index a849ce23ca97..dd8f4bac9df8 100644 --- a/tools/testing/selftests/kvm/set_memory_region_test.c +++ b/tools/testing/selftests/kvm/set_memory_region_test.c @@ -1,4 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#define USE_GUEST_ASSERT_PRINTF 1 + #define _GNU_SOURCE /* for program_invocation_short_name */ #include #include @@ -88,7 +90,7 @@ static void *vcpu_worker(void *data) } if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT) - REPORT_GUEST_ASSERT_1(uc, "val = %lu"); + REPORT_GUEST_ASSERT(uc); return NULL; } @@ -156,19 +158,22 @@ static void guest_code_move_memory_region(void) * window where the memslot is invalid is usually quite small. */ val = guest_spin_on_val(0); - GUEST_ASSERT_1(val == 1 || val == MMIO_VAL, val); + __GUEST_ASSERT(val == 1 || val == MMIO_VAL, + "Expected '1' or MMIO ('%llx'), got '%llx'", MMIO_VAL, val); /* Spin until the misaligning memory region move completes. */ val = guest_spin_on_val(MMIO_VAL); - GUEST_ASSERT_1(val == 1 || val == 0, val); + __GUEST_ASSERT(val == 1 || val == 0, + "Expected '0' or '1' (no MMIO), got '%llx'", val); /* Spin until the memory region starts to get re-aligned. */ val = guest_spin_on_val(0); - GUEST_ASSERT_1(val == 1 || val == MMIO_VAL, val); + __GUEST_ASSERT(val == 1 || val == MMIO_VAL, + "Expected '1' or MMIO ('%llx'), got '%llx'", MMIO_VAL, val); /* Spin until the re-aligning memory region move completes. */ val = guest_spin_on_val(MMIO_VAL); - GUEST_ASSERT_1(val == 1, val); + GUEST_ASSERT_EQ(val, 1); GUEST_DONE(); } @@ -224,15 +229,15 @@ static void guest_code_delete_memory_region(void) /* Spin until the memory region is deleted. */ val = guest_spin_on_val(0); - GUEST_ASSERT_1(val == MMIO_VAL, val); + GUEST_ASSERT_EQ(val, MMIO_VAL); /* Spin until the memory region is recreated. */ val = guest_spin_on_val(MMIO_VAL); - GUEST_ASSERT_1(val == 0, val); + GUEST_ASSERT_EQ(val, 0); /* Spin until the memory region is deleted. */ val = guest_spin_on_val(0); - GUEST_ASSERT_1(val == MMIO_VAL, val); + GUEST_ASSERT_EQ(val, MMIO_VAL); asm("1:\n\t" ".pushsection .rodata\n\t" @@ -249,7 +254,7 @@ static void guest_code_delete_memory_region(void) "final_rip_end: .quad 1b\n\t" ".popsection"); - GUEST_ASSERT_1(0, 0); + GUEST_ASSERT(0); } static void test_delete_memory_region(void) -- cgit From 3d9bd831175e898284eb92dadac7049cd055bad6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:28 -0700 Subject: KVM: selftests: Convert steal_time test to printf style GUEST_ASSERT Convert the steal_time test to use printf-based GUEST_ASERT. Opportunistically use GUEST_ASSERT_EQ() and GUEST_ASSERT_NE() so that the test spits out debug information on failure. Link: https://lore.kernel.org/r/20230729003643.1053367-20-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/steal_time.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c index c87f38712073..8649c8545882 100644 --- a/tools/testing/selftests/kvm/steal_time.c +++ b/tools/testing/selftests/kvm/steal_time.c @@ -4,6 +4,8 @@ * * Copyright (C) 2020, Red Hat, Inc. */ +#define USE_GUEST_ASSERT_PRINTF 1 + #define _GNU_SOURCE #include #include @@ -31,8 +33,8 @@ static uint64_t guest_stolen_time[NR_VCPUS]; static void check_status(struct kvm_steal_time *st) { GUEST_ASSERT(!(READ_ONCE(st->version) & 1)); - GUEST_ASSERT(READ_ONCE(st->flags) == 0); - GUEST_ASSERT(READ_ONCE(st->preempted) == 0); + GUEST_ASSERT_EQ(READ_ONCE(st->flags), 0); + GUEST_ASSERT_EQ(READ_ONCE(st->preempted), 0); } static void guest_code(int cpu) @@ -40,7 +42,7 @@ static void guest_code(int cpu) struct kvm_steal_time *st = st_gva[cpu]; uint32_t version; - GUEST_ASSERT(rdmsr(MSR_KVM_STEAL_TIME) == ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED)); + GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED)); memset(st, 0, sizeof(*st)); GUEST_SYNC(0); @@ -122,8 +124,8 @@ static int64_t smccc(uint32_t func, uint64_t arg) static void check_status(struct st_time *st) { - GUEST_ASSERT(READ_ONCE(st->rev) == 0); - GUEST_ASSERT(READ_ONCE(st->attr) == 0); + GUEST_ASSERT_EQ(READ_ONCE(st->rev), 0); + GUEST_ASSERT_EQ(READ_ONCE(st->attr), 0); } static void guest_code(int cpu) @@ -132,15 +134,15 @@ static void guest_code(int cpu) int64_t status; status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES); - GUEST_ASSERT(status == 0); + GUEST_ASSERT_EQ(status, 0); status = smccc(PV_TIME_FEATURES, PV_TIME_FEATURES); - GUEST_ASSERT(status == 0); + GUEST_ASSERT_EQ(status, 0); status = smccc(PV_TIME_FEATURES, PV_TIME_ST); - GUEST_ASSERT(status == 0); + GUEST_ASSERT_EQ(status, 0); status = smccc(PV_TIME_ST, 0); - GUEST_ASSERT(status != -1); - GUEST_ASSERT(status == (ulong)st_gva[cpu]); + GUEST_ASSERT_NE(status, -1); + GUEST_ASSERT_EQ(status, (ulong)st_gva[cpu]); st = (struct st_time *)status; GUEST_SYNC(0); -- cgit From 06b651d250e5eea0a1c1fd31fbbc0e0b27f66592 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:29 -0700 Subject: KVM: selftests: Convert x86's CPUID test to printf style GUEST_ASSERT Convert x86's CPUID test to use printf-based GUEST_ASSERT_EQ() so that the test prints out debug information. Note, the test previously used REPORT_GUEST_ASSERT_2(), but that was pointless because none of the guest-side code passed any parameters to the assert. Link: https://lore.kernel.org/r/20230729003643.1053367-21-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/cpuid_test.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/cpuid_test.c b/tools/testing/selftests/kvm/x86_64/cpuid_test.c index d3c3aa93f090..eb1b65ffc0d5 100644 --- a/tools/testing/selftests/kvm/x86_64/cpuid_test.c +++ b/tools/testing/selftests/kvm/x86_64/cpuid_test.c @@ -4,6 +4,8 @@ * * Generic tests for KVM CPUID set/get ioctls */ +#define USE_GUEST_ASSERT_PRINTF 1 + #include #include #include @@ -35,10 +37,10 @@ static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid) guest_cpuid->entries[i].index, &eax, &ebx, &ecx, &edx); - GUEST_ASSERT(eax == guest_cpuid->entries[i].eax && - ebx == guest_cpuid->entries[i].ebx && - ecx == guest_cpuid->entries[i].ecx && - edx == guest_cpuid->entries[i].edx); + GUEST_ASSERT_EQ(eax, guest_cpuid->entries[i].eax); + GUEST_ASSERT_EQ(ebx, guest_cpuid->entries[i].ebx); + GUEST_ASSERT_EQ(ecx, guest_cpuid->entries[i].ecx); + GUEST_ASSERT_EQ(edx, guest_cpuid->entries[i].edx); } } @@ -51,7 +53,7 @@ static void guest_main(struct kvm_cpuid2 *guest_cpuid) GUEST_SYNC(2); - GUEST_ASSERT(this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF) == 0x40000001); + GUEST_ASSERT_EQ(this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF), 0x40000001); GUEST_DONE(); } @@ -116,7 +118,7 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage) case UCALL_DONE: return; case UCALL_ABORT: - REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx"); + REPORT_GUEST_ASSERT(uc); default: TEST_ASSERT(false, "Unexpected exit: %s", exit_reason_str(vcpu->run->exit_reason)); -- cgit From 82cb0ed66d4e5207c53b31a29d9f71af83190ee6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:30 -0700 Subject: KVM: selftests: Convert the Hyper-V extended hypercalls test to printf asserts Convert x86's Hyper-V extended hypercalls test to use printf-based GUEST_ASSERT_EQ(). Link: https://lore.kernel.org/r/20230729003643.1053367-22-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c index 73af44d2167f..0107d54a1a08 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c @@ -8,6 +8,7 @@ * Copyright 2022 Google LLC * Author: Vipin Sharma */ +#define USE_GUEST_ASSERT_PRINTF 1 #include "kvm_util.h" #include "processor.h" @@ -84,7 +85,7 @@ int main(void) switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: - REPORT_GUEST_ASSERT_2(uc, "arg1 = %ld, arg2 = %ld"); + REPORT_GUEST_ASSERT(uc); break; case UCALL_DONE: break; -- cgit From 8d1d3ce604e54c43228439291db53543da156110 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:31 -0700 Subject: KVM: selftests: Convert the Hyper-V feature test to printf style GUEST_ASSERT Convert x86's Hyper-V feature test to use print-based guest asserts. Opportunistically use the EQ and NE variants in a few places to capture additional information. Link: https://lore.kernel.org/r/20230729003643.1053367-23-seanjc@google.com Signed-off-by: Sean Christopherson --- .../testing/selftests/kvm/x86_64/hyperv_features.c | 31 +++++++++++++++------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index 78606de9385d..41a6beff78c4 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -4,6 +4,8 @@ * * Tests for Hyper-V features enablement */ +#define USE_GUEST_ASSERT_PRINTF 1 + #include #include #include @@ -53,16 +55,21 @@ static void guest_msr(struct msr_data *msr) vector = rdmsr_safe(msr->idx, &msr_val); if (msr->fault_expected) - GUEST_ASSERT_3(vector == GP_VECTOR, msr->idx, vector, GP_VECTOR); + __GUEST_ASSERT(vector == GP_VECTOR, + "Expected #GP on %sMSR(0x%x), got vector '0x%x'", + msr->idx, msr->write ? "WR" : "RD", vector); else - GUEST_ASSERT_3(!vector, msr->idx, vector, 0); + __GUEST_ASSERT(!vector, + "Expected success on %sMSR(0x%x), got vector '0x%x'", + msr->idx, msr->write ? "WR" : "RD", vector); if (vector || is_write_only_msr(msr->idx)) goto done; if (msr->write) - GUEST_ASSERT_3(msr_val == msr->write_val, msr->idx, - msr_val, msr->write_val); + __GUEST_ASSERT(!vector, + "WRMSR(0x%x) to '0x%llx', RDMSR read '0x%llx'", + msr->idx, msr->write_val, msr_val); /* Invariant TSC bit appears when TSC invariant control MSR is written to */ if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) { @@ -82,7 +89,7 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall) u64 res, input, output; uint8_t vector; - GUEST_ASSERT(hcall->control); + GUEST_ASSERT_NE(hcall->control, 0); wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa); @@ -96,10 +103,14 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall) vector = __hyperv_hypercall(hcall->control, input, output, &res); if (hcall->ud_expected) { - GUEST_ASSERT_2(vector == UD_VECTOR, hcall->control, vector); + __GUEST_ASSERT(vector == UD_VECTOR, + "Expected #UD for control '%u', got vector '0x%x'", + hcall->control, vector); } else { - GUEST_ASSERT_2(!vector, hcall->control, vector); - GUEST_ASSERT_2(res == hcall->expect, hcall->expect, res); + __GUEST_ASSERT(!vector, + "Expected no exception for control '%u', got vector '0x%x'", + hcall->control, vector); + GUEST_ASSERT_EQ(res, hcall->expect); } GUEST_DONE(); @@ -495,7 +506,7 @@ static void guest_test_msrs_access(void) switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: - REPORT_GUEST_ASSERT_3(uc, "MSR = %lx, arg1 = %lx, arg2 = %lx"); + REPORT_GUEST_ASSERT(uc); return; case UCALL_DONE: break; @@ -665,7 +676,7 @@ static void guest_test_hcalls_access(void) switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: - REPORT_GUEST_ASSERT_2(uc, "arg1 = %lx, arg2 = %lx"); + REPORT_GUEST_ASSERT(uc); return; case UCALL_DONE: break; -- cgit From bf6c760b9df39ebc83245b37ef09ee390ba05e9f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:32 -0700 Subject: KVM: selftests: Convert x86's KVM paravirt test to printf style GUEST_ASSERT Convert x86's KVM paravirtualization test to use the printf-based GUEST_ASSERT_EQ(). Link: https://lore.kernel.org/r/20230729003643.1053367-24-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/kvm_pv_test.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c index f774a9e62858..1c28b77ff3cd 100644 --- a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c +++ b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c @@ -4,6 +4,8 @@ * * Tests for KVM paravirtual feature disablement */ +#define USE_GUEST_ASSERT_PRINTF 1 + #include #include #include @@ -46,10 +48,10 @@ static void test_msr(struct msr_data *msr) PR_MSR(msr); vector = rdmsr_safe(msr->idx, &ignored); - GUEST_ASSERT_1(vector == GP_VECTOR, vector); + GUEST_ASSERT_EQ(vector, GP_VECTOR); vector = wrmsr_safe(msr->idx, 0); - GUEST_ASSERT_1(vector == GP_VECTOR, vector); + GUEST_ASSERT_EQ(vector, GP_VECTOR); } struct hcall_data { @@ -77,7 +79,7 @@ static void test_hcall(struct hcall_data *hc) PR_HCALL(hc); r = kvm_hypercall(hc->nr, 0, 0, 0, 0); - GUEST_ASSERT(r == -KVM_ENOSYS); + GUEST_ASSERT_EQ(r, -KVM_ENOSYS); } static void guest_main(void) @@ -125,7 +127,7 @@ static void enter_guest(struct kvm_vcpu *vcpu) pr_hcall(&uc); break; case UCALL_ABORT: - REPORT_GUEST_ASSERT_1(uc, "vector = %lu"); + REPORT_GUEST_ASSERT(uc); return; case UCALL_DONE: return; -- cgit From 0f52e4aaa61400ce68e122b678811777734421ff Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:33 -0700 Subject: KVM: selftests: Convert the MONITOR/MWAIT test to use printf guest asserts Convert x86's MONITOR/MWAIT test to use printf-based guest asserts. Add a macro to handle reporting failures to reduce the amount of copy+paste needed for MONITOR vs. MWAIT. Link: https://lore.kernel.org/r/20230729003643.1053367-25-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/monitor_mwait_test.c | 37 +++++++++++++--------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c b/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c index 72812644d7f5..960fecab3742 100644 --- a/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c +++ b/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c @@ -1,4 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#define USE_GUEST_ASSERT_PRINTF 1 + #include #include #include @@ -16,14 +18,25 @@ enum monitor_mwait_testcases { MWAIT_DISABLED = BIT(2), }; +/* + * If both MWAIT and its quirk are disabled, MONITOR/MWAIT should #UD, in all + * other scenarios KVM should emulate them as nops. + */ +#define GUEST_ASSERT_MONITOR_MWAIT(insn, testcase, vector) \ +do { \ + bool fault_wanted = ((testcase) & MWAIT_QUIRK_DISABLED) && \ + ((testcase) & MWAIT_DISABLED); \ + \ + if (fault_wanted) \ + __GUEST_ASSERT((vector) == UD_VECTOR, \ + "Expected #UD on " insn " for testcase '0x%x', got '0x%x'", vector); \ + else \ + __GUEST_ASSERT(!(vector), \ + "Expected success on " insn " for testcase '0x%x', got '0x%x'", vector); \ +} while (0) + static void guest_monitor_wait(int testcase) { - /* - * If both MWAIT and its quirk are disabled, MONITOR/MWAIT should #UD, - * in all other scenarios KVM should emulate them as nops. - */ - bool fault_wanted = (testcase & MWAIT_QUIRK_DISABLED) && - (testcase & MWAIT_DISABLED); u8 vector; GUEST_SYNC(testcase); @@ -33,16 +46,10 @@ static void guest_monitor_wait(int testcase) * intercept checks, so the inputs for MONITOR and MWAIT must be valid. */ vector = kvm_asm_safe("monitor", "a"(guest_monitor_wait), "c"(0), "d"(0)); - if (fault_wanted) - GUEST_ASSERT_2(vector == UD_VECTOR, testcase, vector); - else - GUEST_ASSERT_2(!vector, testcase, vector); + GUEST_ASSERT_MONITOR_MWAIT("MONITOR", testcase, vector); vector = kvm_asm_safe("mwait", "a"(guest_monitor_wait), "c"(0), "d"(0)); - if (fault_wanted) - GUEST_ASSERT_2(vector == UD_VECTOR, testcase, vector); - else - GUEST_ASSERT_2(!vector, testcase, vector); + GUEST_ASSERT_MONITOR_MWAIT("MWAIT", testcase, vector); } static void guest_code(void) @@ -85,7 +92,7 @@ int main(int argc, char *argv[]) testcase = uc.args[1]; break; case UCALL_ABORT: - REPORT_GUEST_ASSERT_2(uc, "testcase = %lx, vector = %ld"); + REPORT_GUEST_ASSERT(uc); goto done; case UCALL_DONE: goto done; -- cgit From b13a307ce3c6435c288017d7edf1c6e2827470dc Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:34 -0700 Subject: KVM: selftests: Convert x86's nested exceptions test to printf guest asserts Convert x86's nested exceptions test to printf-based guest asserts, and use REPORT_GUEST_ASSERT() instead of TEST_FAIL() so that output is formatted correctly. Link: https://lore.kernel.org/r/20230729003643.1053367-26-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c b/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c index 5f074a6da90c..4a29f59a76be 100644 --- a/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c +++ b/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c @@ -1,4 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only +#define USE_GUEST_ASSERT_PRINTF 1 + #define _GNU_SOURCE /* for program_invocation_short_name */ #include "test_util.h" @@ -180,9 +182,7 @@ static void assert_ucall_vector(struct kvm_vcpu *vcpu, int vector) "Expected L2 to ask for %d, L2 says it's done", vector); break; case UCALL_ABORT: - TEST_FAIL("%s at %s:%ld (0x%lx != 0x%lx)", - (const char *)uc.args[0], __FILE__, uc.args[1], - uc.args[2], uc.args[3]); + REPORT_GUEST_ASSERT(uc); break; default: TEST_FAIL("Expected L2 to ask for %d, got unexpected ucall %lu", vector, uc.cmd); -- cgit From 40b319d6b4e109d5e94ac1054d5147624e79088f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:35 -0700 Subject: KVM: selftests: Convert x86's set BSP ID test to printf style guest asserts Convert the set_boot_cpu_id test to use printf-based guest asserts, specifically the EQ and NE variants. Link: https://lore.kernel.org/r/20230729003643.1053367-27-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c index b25d7556b638..abb3f26d3ce0 100644 --- a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c +++ b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c @@ -4,6 +4,8 @@ * * Copyright (C) 2020, Red Hat, Inc. */ +#define USE_GUEST_ASSERT_PRINTF 1 + #define _GNU_SOURCE /* for program_invocation_name */ #include #include @@ -20,7 +22,7 @@ static void guest_bsp_vcpu(void *arg) { GUEST_SYNC(1); - GUEST_ASSERT(get_bsp_flag() != 0); + GUEST_ASSERT_NE(get_bsp_flag(), 0); GUEST_DONE(); } @@ -29,7 +31,7 @@ static void guest_not_bsp_vcpu(void *arg) { GUEST_SYNC(1); - GUEST_ASSERT(get_bsp_flag() == 0); + GUEST_ASSERT_EQ(get_bsp_flag(), 0); GUEST_DONE(); } @@ -65,7 +67,7 @@ static void run_vcpu(struct kvm_vcpu *vcpu) stage); break; case UCALL_ABORT: - REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx"); + REPORT_GUEST_ASSERT(uc); default: TEST_ASSERT(false, "Unexpected exit: %s", exit_reason_str(vcpu->run->exit_reason)); -- cgit From a925f799428168e1a849509c0f425673b62e53ad Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:36 -0700 Subject: KVM: selftests: Convert the nSVM software interrupt test to printf guest asserts Convert x86's nested SVM software interrupt injection test to use printf- based guest asserts. Opportunistically use GUEST_ASSERT() and GUEST_FAIL() in a few locations to spit out more debug information. Link: https://lore.kernel.org/r/20230729003643.1053367-28-seanjc@google.com Signed-off-by: Sean Christopherson --- .../kvm/x86_64/svm_nested_soft_inject_test.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c index 4e2479716da6..c908412c5754 100644 --- a/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c +++ b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c @@ -8,6 +8,7 @@ * Copyright (C) 2021, Red Hat, Inc. * */ +#define USE_GUEST_ASSERT_PRINTF 1 #include #include @@ -34,13 +35,12 @@ static void l2_guest_code_int(void); static void guest_int_handler(struct ex_regs *regs) { int_fired++; - GUEST_ASSERT_2(regs->rip == (unsigned long)l2_guest_code_int, - regs->rip, (unsigned long)l2_guest_code_int); + GUEST_ASSERT_EQ(regs->rip, (unsigned long)l2_guest_code_int); } static void l2_guest_code_int(void) { - GUEST_ASSERT_1(int_fired == 1, int_fired); + GUEST_ASSERT_EQ(int_fired, 1); /* * Same as the vmmcall() function, but with a ud2 sneaked after the @@ -53,7 +53,7 @@ static void l2_guest_code_int(void) : "rbx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); - GUEST_ASSERT_1(bp_fired == 1, bp_fired); + GUEST_ASSERT_EQ(bp_fired, 1); hlt(); } @@ -66,9 +66,9 @@ static void guest_nmi_handler(struct ex_regs *regs) if (nmi_stage_get() == 1) { vmmcall(); - GUEST_ASSERT(false); + GUEST_FAIL("Unexpected resume after VMMCALL"); } else { - GUEST_ASSERT_1(nmi_stage_get() == 3, nmi_stage_get()); + GUEST_ASSERT_EQ(nmi_stage_get(), 3); GUEST_DONE(); } } @@ -104,7 +104,8 @@ static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t i } run_guest(vmcb, svm->vmcb_gpa); - GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_VMMCALL, + __GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL, + "Expected VMMCAL #VMEXIT, got '0x%x', info1 = '0x%llx, info2 = '0x%llx'", vmcb->control.exit_code, vmcb->control.exit_info_1, vmcb->control.exit_info_2); @@ -112,7 +113,7 @@ static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t i clgi(); x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_NMI); - GUEST_ASSERT_1(nmi_stage_get() == 1, nmi_stage_get()); + GUEST_ASSERT_EQ(nmi_stage_get(), 1); nmi_stage_inc(); stgi(); @@ -133,7 +134,8 @@ static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t i vmcb->control.next_rip = vmcb->save.rip + 2; run_guest(vmcb, svm->vmcb_gpa); - GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_HLT, + __GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_HLT, + "Expected HLT #VMEXIT, got '0x%x', info1 = '0x%llx, info2 = '0x%llx'", vmcb->control.exit_code, vmcb->control.exit_info_1, vmcb->control.exit_info_2); @@ -185,7 +187,7 @@ static void run_test(bool is_nmi) switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: - REPORT_GUEST_ASSERT_3(uc, "vals = 0x%lx 0x%lx 0x%lx"); + REPORT_GUEST_ASSERT(uc); break; /* NOT REACHED */ case UCALL_DONE: -- cgit From 847ae0795514a15f2845584870750aac4fc5aaee Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:37 -0700 Subject: KVM: selftests: Convert x86's TSC MSRs test to use printf guest asserts Convert x86's TSC MSRs test, and it's liberal use of GUEST_ASSERT_EQ(), to use printf-based guest assert reporting. Link: https://lore.kernel.org/r/20230729003643.1053367-29-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c index 9265965bd2cd..cf9114f70e1c 100644 --- a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c +++ b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c @@ -4,6 +4,8 @@ * * Copyright (C) 2020, Red Hat, Inc. */ +#define USE_GUEST_ASSERT_PRINTF 1 + #include #include #include "kvm_util.h" @@ -84,7 +86,7 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage) ksft_test_result_pass("stage %d passed\n", stage + 1); return; case UCALL_ABORT: - REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx"); + REPORT_GUEST_ASSERT(uc); default: TEST_ASSERT(false, "Unexpected exit: %s", exit_reason_str(vcpu->run->exit_reason)); -- cgit From 417bfd0c820f17f40572aca37e5134ed329b791f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:38 -0700 Subject: KVM: selftests: Convert the x86 userspace I/O test to printf guest assert Convert x86's userspace I/O test to use printf-based guest asserts. Link: https://lore.kernel.org/r/20230729003643.1053367-30-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/userspace_io_test.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c index 0cb51fa42773..2c5d2a18d184 100644 --- a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c +++ b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c @@ -1,4 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#define USE_GUEST_ASSERT_PRINTF 1 + #include #include #include @@ -20,8 +22,8 @@ static void guest_ins_port80(uint8_t *buffer, unsigned int count) end = (unsigned long)buffer + 8192; asm volatile("cld; rep; insb" : "+D"(buffer), "+c"(count) : "d"(0x80) : "memory"); - GUEST_ASSERT_1(count == 0, count); - GUEST_ASSERT_2((unsigned long)buffer == end, buffer, end); + GUEST_ASSERT_EQ(count, 0); + GUEST_ASSERT_EQ((unsigned long)buffer, end); } static void guest_code(void) @@ -43,7 +45,9 @@ static void guest_code(void) memset(buffer, 0, sizeof(buffer)); guest_ins_port80(buffer, 8192); for (i = 0; i < 8192; i++) - GUEST_ASSERT_2(buffer[i] == 0xaa, i, buffer[i]); + __GUEST_ASSERT(buffer[i] == 0xaa, + "Expected '0xaa', got '0x%x' at buffer[%u]", + buffer[i], i); GUEST_DONE(); } @@ -91,7 +95,7 @@ int main(int argc, char *argv[]) case UCALL_DONE: break; case UCALL_ABORT: - REPORT_GUEST_ASSERT_2(uc, "argN+1 = 0x%lx, argN+2 = 0x%lx"); + REPORT_GUEST_ASSERT(uc); default: TEST_FAIL("Unknown ucall %lu", uc.cmd); } -- cgit From 30a6e0b4553dbf2d935d26945788d37ddb234fa7 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:39 -0700 Subject: KVM: selftests: Convert VMX's PMU capabilities test to printf guest asserts Convert x86's VMX PMU capabilities test to use printf-based guest asserts. Opportunstically add a helper to do the WRMSR+assert so as to reduce the amount of copy+paste needed to spit out debug information. Link: https://lore.kernel.org/r/20230729003643.1053367-31-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/vmx_pmu_caps_test.c | 28 ++++++++++++---------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index 34efd57c2b32..ba09d5a01c39 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -10,6 +10,7 @@ * and check it can be retrieved with KVM_GET_MSR, also test * the invalid LBR formats are rejected. */ +#define USE_GUEST_ASSERT_PRINTF 1 #define _GNU_SOURCE /* for program_invocation_short_name */ #include @@ -52,23 +53,24 @@ static const union perf_capabilities format_caps = { .pebs_format = -1, }; +static void guest_test_perf_capabilities_gp(uint64_t val) +{ + uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val); + + __GUEST_ASSERT(vector == GP_VECTOR, + "Expected #GP for value '0x%llx', got vector '0x%x'", + val, vector); +} + static void guest_code(uint64_t current_val) { - uint8_t vector; int i; - vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, current_val); - GUEST_ASSERT_2(vector == GP_VECTOR, current_val, vector); + guest_test_perf_capabilities_gp(current_val); + guest_test_perf_capabilities_gp(0); - vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, 0); - GUEST_ASSERT_2(vector == GP_VECTOR, 0, vector); - - for (i = 0; i < 64; i++) { - vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, - current_val ^ BIT_ULL(i)); - GUEST_ASSERT_2(vector == GP_VECTOR, - current_val ^ BIT_ULL(i), vector); - } + for (i = 0; i < 64; i++) + guest_test_perf_capabilities_gp(current_val ^ BIT_ULL(i)); GUEST_DONE(); } @@ -95,7 +97,7 @@ static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap) switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: - REPORT_GUEST_ASSERT_2(uc, "val = 0x%lx, vector = %lu"); + REPORT_GUEST_ASSERT(uc); break; case UCALL_DONE: break; -- cgit From 4e15c38a1aca477d7f99b66b745e811bc95f9420 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:40 -0700 Subject: KVM: selftests: Convert x86's XCR0 test to use printf-based guest asserts Convert x86's XCR0 vs. CPUID test to use printf-based guest asserts. Link: https://lore.kernel.org/r/20230729003643.1053367-32-seanjc@google.com Signed-off-by: Sean Christopherson --- .../testing/selftests/kvm/x86_64/xcr0_cpuid_test.c | 29 ++++++++++++++-------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c index 905bd5ae4431..5e8290797720 100644 --- a/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c +++ b/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c @@ -4,6 +4,7 @@ * * Copyright (C) 2022, Google LLC. */ +#define USE_GUEST_ASSERT_PRINTF 1 #include #include @@ -20,13 +21,14 @@ * Assert that architectural dependency rules are satisfied, e.g. that AVX is * supported if and only if SSE is supported. */ -#define ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0, xfeatures, dependencies) \ -do { \ - uint64_t __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \ - \ - GUEST_ASSERT_3((__supported & (xfeatures)) != (xfeatures) || \ - __supported == ((xfeatures) | (dependencies)), \ - __supported, (xfeatures), (dependencies)); \ +#define ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0, xfeatures, dependencies) \ +do { \ + uint64_t __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \ + \ + __GUEST_ASSERT((__supported & (xfeatures)) != (xfeatures) || \ + __supported == ((xfeatures) | (dependencies)), \ + "supported = 0x%llx, xfeatures = 0x%llx, dependencies = 0x%llx", \ + __supported, (xfeatures), (dependencies)); \ } while (0) /* @@ -41,7 +43,8 @@ do { \ do { \ uint64_t __supported = (supported_xcr0) & (xfeatures); \ \ - GUEST_ASSERT_2(!__supported || __supported == (xfeatures), \ + __GUEST_ASSERT(!__supported || __supported == (xfeatures), \ + "supported = 0x%llx, xfeatures = 0x%llx", \ __supported, (xfeatures)); \ } while (0) @@ -79,14 +82,18 @@ static void guest_code(void) XFEATURE_MASK_XTILE); vector = xsetbv_safe(0, supported_xcr0); - GUEST_ASSERT_2(!vector, supported_xcr0, vector); + __GUEST_ASSERT(!vector, + "Expected success on XSETBV(0x%llx), got vector '0x%x'", + supported_xcr0, vector); for (i = 0; i < 64; i++) { if (supported_xcr0 & BIT_ULL(i)) continue; vector = xsetbv_safe(0, supported_xcr0 | BIT_ULL(i)); - GUEST_ASSERT_3(vector == GP_VECTOR, supported_xcr0, vector, BIT_ULL(i)); + __GUEST_ASSERT(vector == GP_VECTOR, + "Expected #GP on XSETBV(0x%llx), supported XCR0 = %llx, got vector '0x%x'", + BIT_ULL(i), supported_xcr0, vector); } GUEST_DONE(); @@ -117,7 +124,7 @@ int main(int argc, char *argv[]) switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: - REPORT_GUEST_ASSERT_3(uc, "0x%lx 0x%lx 0x%lx"); + REPORT_GUEST_ASSERT(uc); break; case UCALL_DONE: goto done; -- cgit From 7ce7f8e754186f71ba7daffbc99543961a613bba Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:41 -0700 Subject: KVM: selftests: Rip out old, param-based guest assert macros Drop the param-based guest assert macros and enable the printf versions for all selftests. Note! This change can affect tests even if they don't use directly use guest asserts! E.g. via library code, or due to the compiler making different optimization decisions. Link: https://lore.kernel.org/r/20230729003643.1053367-33-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/aarch64/arch_timer.c | 2 - .../selftests/kvm/aarch64/debug-exceptions.c | 2 - tools/testing/selftests/kvm/aarch64/hypercalls.c | 2 - .../selftests/kvm/aarch64/page_fault_test.c | 2 - tools/testing/selftests/kvm/aarch64/vgic_irq.c | 2 - tools/testing/selftests/kvm/guest_print_test.c | 2 - tools/testing/selftests/kvm/include/ucall_common.h | 71 ---------------------- tools/testing/selftests/kvm/memslot_perf_test.c | 2 - tools/testing/selftests/kvm/s390x/memop.c | 2 - tools/testing/selftests/kvm/s390x/tprot.c | 2 - .../testing/selftests/kvm/set_memory_region_test.c | 2 - tools/testing/selftests/kvm/steal_time.c | 2 - tools/testing/selftests/kvm/x86_64/cpuid_test.c | 2 - .../kvm/x86_64/hyperv_extended_hypercalls.c | 2 - .../testing/selftests/kvm/x86_64/hyperv_features.c | 2 - tools/testing/selftests/kvm/x86_64/kvm_pv_test.c | 2 - .../selftests/kvm/x86_64/monitor_mwait_test.c | 2 - .../selftests/kvm/x86_64/nested_exceptions_test.c | 2 - .../testing/selftests/kvm/x86_64/set_boot_cpu_id.c | 2 - .../kvm/x86_64/svm_nested_soft_inject_test.c | 2 - tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c | 2 - .../selftests/kvm/x86_64/userspace_io_test.c | 2 - .../selftests/kvm/x86_64/vmx_pmu_caps_test.c | 2 - .../testing/selftests/kvm/x86_64/xcr0_cpuid_test.c | 2 - 24 files changed, 117 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c index b53bcf126e6a..274b8465b42a 100644 --- a/tools/testing/selftests/kvm/aarch64/arch_timer.c +++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c @@ -19,8 +19,6 @@ * * Copyright (c) 2021, Google LLC. */ -#define USE_GUEST_ASSERT_PRINTF 1 - #define _GNU_SOURCE #include diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c index fdd5b05e1b0e..f5b6cb3a0019 100644 --- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c +++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c @@ -1,6 +1,4 @@ // SPDX-License-Identifier: GPL-2.0 -#define USE_GUEST_ASSERT_PRINTF 1 - #include #include #include diff --git a/tools/testing/selftests/kvm/aarch64/hypercalls.c b/tools/testing/selftests/kvm/aarch64/hypercalls.c index 94555a7d3c7e..31f66ba97228 100644 --- a/tools/testing/selftests/kvm/aarch64/hypercalls.c +++ b/tools/testing/selftests/kvm/aarch64/hypercalls.c @@ -8,8 +8,6 @@ * hypercalls are properly masked or unmasked to the guest when disabled or * enabled from the KVM userspace, respectively. */ -#define USE_GUEST_ASSERT_PRINTF 1 - #include #include #include diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c index 0b0dd90feae5..47bb914ab2fa 100644 --- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c +++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c @@ -7,8 +7,6 @@ * hugetlbfs with a hole). It checks that the expected handling method is * called (e.g., uffd faults with the right address and write/read flag). */ -#define USE_GUEST_ASSERT_PRINTF 1 - #define _GNU_SOURCE #include #include diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index 67da33aa6d17..2e64b4856e38 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -7,8 +7,6 @@ * host to inject a specific intid via a GUEST_SYNC call, and then checks that * it received it. */ -#define USE_GUEST_ASSERT_PRINTF 1 - #include #include #include diff --git a/tools/testing/selftests/kvm/guest_print_test.c b/tools/testing/selftests/kvm/guest_print_test.c index 267e01d057fb..41230b746190 100644 --- a/tools/testing/selftests/kvm/guest_print_test.c +++ b/tools/testing/selftests/kvm/guest_print_test.c @@ -4,8 +4,6 @@ * * Copyright 2022, Google, Inc. and/or its affiliates. */ -#define USE_GUEST_ASSERT_PRINTF 1 - #include #include #include diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h index 9e5948dab030..99bbb56cd1a5 100644 --- a/tools/testing/selftests/kvm/include/ucall_common.h +++ b/tools/testing/selftests/kvm/include/ucall_common.h @@ -66,7 +66,6 @@ enum guest_assert_builtin_args { GUEST_ASSERT_BUILTIN_NARGS }; -#ifdef USE_GUEST_ASSERT_PRINTF #define ____GUEST_ASSERT(_condition, _exp, _fmt, _args...) \ do { \ if (!(_condition)) \ @@ -108,74 +107,4 @@ do { \ #define GUEST_ASSERT_1(_condition, arg1) \ __GUEST_ASSERT(_condition, "arg1 = 0x%lx", arg1) -#else - -#define __GUEST_ASSERT(_condition, _condstr, _nargs, _args...) \ -do { \ - if (!(_condition)) \ - ucall(UCALL_ABORT, GUEST_ASSERT_BUILTIN_NARGS + _nargs, \ - "Failed guest assert: " _condstr, \ - __FILE__, __LINE__, ##_args); \ -} while (0) - -#define GUEST_ASSERT(_condition) \ - __GUEST_ASSERT(_condition, #_condition, 0, 0) - -#define GUEST_ASSERT_1(_condition, arg1) \ - __GUEST_ASSERT(_condition, #_condition, 1, (arg1)) - -#define GUEST_ASSERT_2(_condition, arg1, arg2) \ - __GUEST_ASSERT(_condition, #_condition, 2, (arg1), (arg2)) - -#define GUEST_ASSERT_3(_condition, arg1, arg2, arg3) \ - __GUEST_ASSERT(_condition, #_condition, 3, (arg1), (arg2), (arg3)) - -#define GUEST_ASSERT_4(_condition, arg1, arg2, arg3, arg4) \ - __GUEST_ASSERT(_condition, #_condition, 4, (arg1), (arg2), (arg3), (arg4)) - -#define GUEST_ASSERT_EQ(a, b) __GUEST_ASSERT((a) == (b), #a " == " #b, 2, a, b) - -#define __REPORT_GUEST_ASSERT(_ucall, fmt, _args...) \ - TEST_FAIL("%s at %s:%ld\n" fmt, \ - (const char *)(_ucall).args[GUEST_ERROR_STRING], \ - (const char *)(_ucall).args[GUEST_FILE], \ - (_ucall).args[GUEST_LINE], \ - ##_args) - -#define GUEST_ASSERT_ARG(ucall, i) ((ucall).args[GUEST_ASSERT_BUILTIN_NARGS + i]) - -#define REPORT_GUEST_ASSERT(ucall) \ - __REPORT_GUEST_ASSERT((ucall), "") - -#define REPORT_GUEST_ASSERT_1(ucall, fmt) \ - __REPORT_GUEST_ASSERT((ucall), \ - fmt, \ - GUEST_ASSERT_ARG((ucall), 0)) - -#define REPORT_GUEST_ASSERT_2(ucall, fmt) \ - __REPORT_GUEST_ASSERT((ucall), \ - fmt, \ - GUEST_ASSERT_ARG((ucall), 0), \ - GUEST_ASSERT_ARG((ucall), 1)) - -#define REPORT_GUEST_ASSERT_3(ucall, fmt) \ - __REPORT_GUEST_ASSERT((ucall), \ - fmt, \ - GUEST_ASSERT_ARG((ucall), 0), \ - GUEST_ASSERT_ARG((ucall), 1), \ - GUEST_ASSERT_ARG((ucall), 2)) - -#define REPORT_GUEST_ASSERT_4(ucall, fmt) \ - __REPORT_GUEST_ASSERT((ucall), \ - fmt, \ - GUEST_ASSERT_ARG((ucall), 0), \ - GUEST_ASSERT_ARG((ucall), 1), \ - GUEST_ASSERT_ARG((ucall), 2), \ - GUEST_ASSERT_ARG((ucall), 3)) - -#define REPORT_GUEST_ASSERT_N(ucall, fmt, args...) \ - __REPORT_GUEST_ASSERT((ucall), fmt, ##args) - -#endif /* USE_GUEST_ASSERT_PRINTF */ - #endif /* SELFTEST_KVM_UCALL_COMMON_H */ diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c index 55f1bc70e571..20eb2e730800 100644 --- a/tools/testing/selftests/kvm/memslot_perf_test.c +++ b/tools/testing/selftests/kvm/memslot_perf_test.c @@ -6,8 +6,6 @@ * * Basic guest setup / host vCPU thread code lifted from set_memory_region_test. */ -#define USE_GUEST_ASSERT_PRINTF 1 - #include #include #include diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c index a49173907cec..bb3ca9a5d731 100644 --- a/tools/testing/selftests/kvm/s390x/memop.c +++ b/tools/testing/selftests/kvm/s390x/memop.c @@ -4,8 +4,6 @@ * * Copyright (C) 2019, Red Hat, Inc. */ -#define USE_GUEST_ASSERT_PRINTF 1 - #include #include #include diff --git a/tools/testing/selftests/kvm/s390x/tprot.c b/tools/testing/selftests/kvm/s390x/tprot.c index c12c6824d963..c73f948c9b63 100644 --- a/tools/testing/selftests/kvm/s390x/tprot.c +++ b/tools/testing/selftests/kvm/s390x/tprot.c @@ -4,8 +4,6 @@ * * Copyright IBM Corp. 2021 */ -#define USE_GUEST_ASSERT_PRINTF 1 - #include #include "test_util.h" #include "kvm_util.h" diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c index dd8f4bac9df8..b32960189f5f 100644 --- a/tools/testing/selftests/kvm/set_memory_region_test.c +++ b/tools/testing/selftests/kvm/set_memory_region_test.c @@ -1,6 +1,4 @@ // SPDX-License-Identifier: GPL-2.0 -#define USE_GUEST_ASSERT_PRINTF 1 - #define _GNU_SOURCE /* for program_invocation_short_name */ #include #include diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c index 8649c8545882..171adfb2a6cb 100644 --- a/tools/testing/selftests/kvm/steal_time.c +++ b/tools/testing/selftests/kvm/steal_time.c @@ -4,8 +4,6 @@ * * Copyright (C) 2020, Red Hat, Inc. */ -#define USE_GUEST_ASSERT_PRINTF 1 - #define _GNU_SOURCE #include #include diff --git a/tools/testing/selftests/kvm/x86_64/cpuid_test.c b/tools/testing/selftests/kvm/x86_64/cpuid_test.c index eb1b65ffc0d5..3b34d8156d1c 100644 --- a/tools/testing/selftests/kvm/x86_64/cpuid_test.c +++ b/tools/testing/selftests/kvm/x86_64/cpuid_test.c @@ -4,8 +4,6 @@ * * Generic tests for KVM CPUID set/get ioctls */ -#define USE_GUEST_ASSERT_PRINTF 1 - #include #include #include diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c index 0107d54a1a08..e036db1f32b9 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c @@ -8,8 +8,6 @@ * Copyright 2022 Google LLC * Author: Vipin Sharma */ -#define USE_GUEST_ASSERT_PRINTF 1 - #include "kvm_util.h" #include "processor.h" #include "hyperv.h" diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index 41a6beff78c4..9f28aa276c4e 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -4,8 +4,6 @@ * * Tests for Hyper-V features enablement */ -#define USE_GUEST_ASSERT_PRINTF 1 - #include #include #include diff --git a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c index 1c28b77ff3cd..9e2879af7c20 100644 --- a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c +++ b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c @@ -4,8 +4,6 @@ * * Tests for KVM paravirtual feature disablement */ -#define USE_GUEST_ASSERT_PRINTF 1 - #include #include #include diff --git a/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c b/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c index 960fecab3742..80aa3d8b18f8 100644 --- a/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c +++ b/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c @@ -1,6 +1,4 @@ // SPDX-License-Identifier: GPL-2.0 -#define USE_GUEST_ASSERT_PRINTF 1 - #include #include #include diff --git a/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c b/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c index 4a29f59a76be..3670331adf21 100644 --- a/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c +++ b/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c @@ -1,6 +1,4 @@ // SPDX-License-Identifier: GPL-2.0-only -#define USE_GUEST_ASSERT_PRINTF 1 - #define _GNU_SOURCE /* for program_invocation_short_name */ #include "test_util.h" diff --git a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c index abb3f26d3ce0..366cf18600bc 100644 --- a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c +++ b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c @@ -4,8 +4,6 @@ * * Copyright (C) 2020, Red Hat, Inc. */ -#define USE_GUEST_ASSERT_PRINTF 1 - #define _GNU_SOURCE /* for program_invocation_name */ #include #include diff --git a/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c index c908412c5754..7ee44496cf97 100644 --- a/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c +++ b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c @@ -8,8 +8,6 @@ * Copyright (C) 2021, Red Hat, Inc. * */ -#define USE_GUEST_ASSERT_PRINTF 1 - #include #include #include diff --git a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c index cf9114f70e1c..12b0964f4f13 100644 --- a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c +++ b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c @@ -4,8 +4,6 @@ * * Copyright (C) 2020, Red Hat, Inc. */ -#define USE_GUEST_ASSERT_PRINTF 1 - #include #include #include "kvm_util.h" diff --git a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c index 2c5d2a18d184..255c50b0dc32 100644 --- a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c +++ b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c @@ -1,6 +1,4 @@ // SPDX-License-Identifier: GPL-2.0 -#define USE_GUEST_ASSERT_PRINTF 1 - #include #include #include diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index ba09d5a01c39..ebbcb0a3f743 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -10,8 +10,6 @@ * and check it can be retrieved with KVM_GET_MSR, also test * the invalid LBR formats are rejected. */ -#define USE_GUEST_ASSERT_PRINTF 1 - #define _GNU_SOURCE /* for program_invocation_short_name */ #include diff --git a/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c index 5e8290797720..77d04a7bdadd 100644 --- a/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c +++ b/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c @@ -4,8 +4,6 @@ * * Copyright (C) 2022, Google LLC. */ -#define USE_GUEST_ASSERT_PRINTF 1 - #include #include #include -- cgit From 6f321017c84b33a69f6ac131f6c91d1e8a5ff585 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:42 -0700 Subject: KVM: selftests: Print out guest RIP on unhandled exception Use the newfanged printf-based guest assert framework to spit out the guest RIP when an unhandled exception is detected, which makes debugging such failures *much* easier. Link: https://lore.kernel.org/r/20230729003643.1053367-34-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/lib/x86_64/processor.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index d4a0b504b1e0..d8288374078e 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1074,11 +1074,6 @@ static bool kvm_fixup_exception(struct ex_regs *regs) return true; } -void kvm_exit_unexpected_vector(uint32_t value) -{ - ucall(UCALL_UNHANDLED, 1, value); -} - void route_exception(struct ex_regs *regs) { typedef void(*handler)(struct ex_regs *); @@ -1092,7 +1087,10 @@ void route_exception(struct ex_regs *regs) if (kvm_fixup_exception(regs)) return; - kvm_exit_unexpected_vector(regs->vector); + ucall_assert(UCALL_UNHANDLED, + "Unhandled exception in guest", __FILE__, __LINE__, + "Unhandled exception '0x%lx' at guest RIP '0x%lx'", + regs->vector, regs->rip); } void vm_init_descriptor_tables(struct kvm_vm *vm) @@ -1135,12 +1133,8 @@ void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) { struct ucall uc; - if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) { - uint64_t vector = uc.args[0]; - - TEST_FAIL("Unexpected vectored event in guest (vector:0x%lx)", - vector); - } + if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) + REPORT_GUEST_ASSERT(uc); } const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, -- cgit From a05c4c2bd8b59e291cc9d68b79d6b45a34bcb54e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:36:43 -0700 Subject: KVM: selftests: Use GUEST_FAIL() in ARM's arch timer helpers Use GUEST_FAIL() in ARM's arch timer helpers now that printf-based guest asserts are the default (and only) style of guest asserts, and say goodbye to the GUEST_ASSERT_1() alias. Link: https://lore.kernel.org/r/20230729003643.1053367-35-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/aarch64/arch_timer.h | 12 ++++++------ tools/testing/selftests/kvm/include/ucall_common.h | 4 ---- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/tools/testing/selftests/kvm/include/aarch64/arch_timer.h b/tools/testing/selftests/kvm/include/aarch64/arch_timer.h index cb7c03de3a21..b3e97525cb55 100644 --- a/tools/testing/selftests/kvm/include/aarch64/arch_timer.h +++ b/tools/testing/selftests/kvm/include/aarch64/arch_timer.h @@ -41,7 +41,7 @@ static inline uint64_t timer_get_cntct(enum arch_timer timer) case PHYSICAL: return read_sysreg(cntpct_el0); default: - GUEST_ASSERT_1(0, timer); + GUEST_FAIL("Unexpected timer type = %u", timer); } /* We should not reach here */ @@ -58,7 +58,7 @@ static inline void timer_set_cval(enum arch_timer timer, uint64_t cval) write_sysreg(cval, cntp_cval_el0); break; default: - GUEST_ASSERT_1(0, timer); + GUEST_FAIL("Unexpected timer type = %u", timer); } isb(); @@ -72,7 +72,7 @@ static inline uint64_t timer_get_cval(enum arch_timer timer) case PHYSICAL: return read_sysreg(cntp_cval_el0); default: - GUEST_ASSERT_1(0, timer); + GUEST_FAIL("Unexpected timer type = %u", timer); } /* We should not reach here */ @@ -89,7 +89,7 @@ static inline void timer_set_tval(enum arch_timer timer, uint32_t tval) write_sysreg(tval, cntp_tval_el0); break; default: - GUEST_ASSERT_1(0, timer); + GUEST_FAIL("Unexpected timer type = %u", timer); } isb(); @@ -105,7 +105,7 @@ static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl) write_sysreg(ctl, cntp_ctl_el0); break; default: - GUEST_ASSERT_1(0, timer); + GUEST_FAIL("Unexpected timer type = %u", timer); } isb(); @@ -119,7 +119,7 @@ static inline uint32_t timer_get_ctl(enum arch_timer timer) case PHYSICAL: return read_sysreg(cntp_ctl_el0); default: - GUEST_ASSERT_1(0, timer); + GUEST_FAIL("Unexpected timer type = %u", timer); } /* We should not reach here */ diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h index 99bbb56cd1a5..112bc1da732a 100644 --- a/tools/testing/selftests/kvm/include/ucall_common.h +++ b/tools/testing/selftests/kvm/include/ucall_common.h @@ -103,8 +103,4 @@ do { \ (const char *)(ucall).args[GUEST_FILE], \ (ucall).args[GUEST_LINE], "%s", (ucall).buffer) -/* FIXME: Drop this alias once the param-based guest asserts are gone. */ -#define GUEST_ASSERT_1(_condition, arg1) \ - __GUEST_ASSERT(_condition, "arg1 = 0x%lx", arg1) - #endif /* SELFTEST_KVM_UCALL_COMMON_H */ -- cgit From 7f717f54845c518a64fe5464d855ddeca272483a Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Tue, 18 Jul 2023 12:15:44 +0200 Subject: KVM: x86: Remove x86_emulate_ops::guest_has_long_mode Remove x86_emulate_ops::guest_has_long_mode along with its implementation, emulator_guest_has_long_mode(). It has been unused since commit 1d0da94cdafe ("KVM: x86: do not go through ctxt->ops when emulating rsm"). No functional change intended. Signed-off-by: Michal Luczaj Link: https://lore.kernel.org/r/20230718101809.1249769-1-mhal@rbox.co Signed-off-by: Sean Christopherson --- arch/x86/kvm/kvm_emulate.h | 1 - arch/x86/kvm/x86.c | 6 ------ 2 files changed, 7 deletions(-) diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index ab65f3a47dfd..be7aeb9b8ea3 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -213,7 +213,6 @@ struct x86_emulate_ops { bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool exact_only); - bool (*guest_has_long_mode)(struct x86_emulate_ctxt *ctxt); bool (*guest_has_movbe)(struct x86_emulate_ctxt *ctxt); bool (*guest_has_fxsr)(struct x86_emulate_ctxt *ctxt); bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a6b9bea62fb8..0fca1546e029 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8229,11 +8229,6 @@ static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only); } -static bool emulator_guest_has_long_mode(struct x86_emulate_ctxt *ctxt) -{ - return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_LM); -} - static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt) { return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE); @@ -8335,7 +8330,6 @@ static const struct x86_emulate_ops emulate_ops = { .fix_hypercall = emulator_fix_hypercall, .intercept = emulator_intercept, .get_cpuid = emulator_get_cpuid, - .guest_has_long_mode = emulator_guest_has_long_mode, .guest_has_movbe = emulator_guest_has_movbe, .guest_has_fxsr = emulator_guest_has_fxsr, .guest_has_rdpid = emulator_guest_has_rdpid, -- cgit From af8e2ccfa6f101f505add076c1a4d56c718e0d50 Mon Sep 17 00:00:00 2001 From: Takahiro Itazuri Date: Wed, 12 Jul 2023 19:31:36 +0100 Subject: KVM: x86: Advertise host CPUID 0x80000005 in KVM_GET_SUPPORTED_CPUID Advertise CPUID 0x80000005 (L1 cache and TLB info) to userspace so that VMMs that reflect KVM_GET_SUPPORTED_CPUID into KVM_SET_CPUID2 will enumerate sane cache/TLB information to the guest. CPUID 0x80000006 (L2 cache and TLB and L3 cache info) has been returned since commit 43d05de2bee7 ("KVM: pass through CPUID(0x80000006)"). Enumerating both 0x80000005 and 0x80000006 with KVM_GET_SUPPORTED_CPUID is better than reporting one or the other, and 0x80000005 could be helpful for VMM to pass it to KVM_SET_CPUID{,2} for the same reason with 0x80000006. Signed-off-by: Takahiro Itazuri Link: https://lore.kernel.org/all/ZK7NmfKI9xur%2FMop@google.com Link: https://lore.kernel.org/r/20230712183136.85561-1-itazur@amazon.com [sean: add link, massage changelog] Signed-off-by: Sean Christopherson --- arch/x86/kvm/cpuid.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 7f4d13383cf2..ad6566636c22 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -1151,6 +1151,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) cpuid_entry_override(entry, CPUID_8000_0001_EDX); cpuid_entry_override(entry, CPUID_8000_0001_ECX); break; + case 0x80000005: + /* Pass host L1 cache and TLB info. */ + break; case 0x80000006: /* Drop reserved bits, pass host L2 cache and TLB info. */ entry->edx &= ~GENMASK(17, 16); -- cgit From a2fd5d02bad6d63daaaf4a8bb19c2400387aca61 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 6 Jun 2023 17:43:09 -0700 Subject: KVM: x86: Snapshot host's MSR_IA32_ARCH_CAPABILITIES Snapshot the host's MSR_IA32_ARCH_CAPABILITIES, if it's supported, instead of reading the MSR every time KVM wants to query the host state, e.g. when initializing the default value during vCPU creation. The paths that query ARCH_CAPABILITIES aren't particularly performance sensitive, but creating vCPUs is a frequent enough operation that burning 8 bytes is a good trade-off. Alternatively, KVM could add a field in kvm_caps and thus skip the on-demand calculations entirely, but a pure snapshot isn't possible due to the way KVM handles the l1tf_vmx_mitigation module param. And unlike the other "supported" fields in kvm_caps, KVM doesn't enforce the "supported" value, i.e. KVM treats ARCH_CAPABILITIES like a CPUID leaf and lets userspace advertise whatever it wants. Those problems are solvable, but it's not clear there is real benefit versus snapshotting the host value, and grabbing the host value will allow additional cleanup of KVM's FB_CLEAR_CTRL code. Link: https://lore.kernel.org/all/20230524061634.54141-2-chao.gao@intel.com Cc: Chao Gao Cc: Xiaoyao Li Reviewed-by: Chao Gao Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20230607004311.1420507-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 22 ++++++---------------- arch/x86/kvm/x86.c | 13 +++++++------ arch/x86/kvm/x86.h | 1 + 3 files changed, 14 insertions(+), 22 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 254f2296e549..f0ec9acae86c 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -255,14 +255,9 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) return 0; } - if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { - u64 msr; - - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); - if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { - l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; - return 0; - } + if (host_arch_capabilities & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; + return 0; } /* If set to auto use the default l1tf mitigation method */ @@ -373,15 +368,10 @@ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) static void vmx_setup_fb_clear_ctrl(void) { - u64 msr; - - if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) && + if ((host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) && !boot_cpu_has_bug(X86_BUG_MDS) && - !boot_cpu_has_bug(X86_BUG_TAA)) { - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); - if (msr & ARCH_CAP_FB_CLEAR_CTRL) - vmx_fb_clear_ctrl_available = true; - } + !boot_cpu_has_bug(X86_BUG_TAA)) + vmx_fb_clear_ctrl_available = true; } static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0fca1546e029..a1b13d2d1d71 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -237,6 +237,9 @@ EXPORT_SYMBOL_GPL(enable_apicv); u64 __read_mostly host_xss; EXPORT_SYMBOL_GPL(host_xss); +u64 __read_mostly host_arch_capabilities; +EXPORT_SYMBOL_GPL(host_arch_capabilities); + const struct _kvm_stats_desc kvm_vm_stats_desc[] = { KVM_GENERIC_VM_STATS(), STATS_DESC_COUNTER(VM, mmu_shadow_zapped), @@ -1611,12 +1614,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr) static u64 kvm_get_arch_capabilities(void) { - u64 data = 0; - - if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data); - data &= KVM_SUPPORTED_ARCH_CAP; - } + u64 data = host_arch_capabilities & KVM_SUPPORTED_ARCH_CAP; /* * If nx_huge_pages is enabled, KVM's shadow paging will ensure that @@ -9490,6 +9488,9 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) kvm_init_pmu_capability(ops->pmu_ops); + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, host_arch_capabilities); + r = ops->hardware_setup(); if (r != 0) goto out_mmu_exit; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 82e3dafc5453..1e7be1f6ab29 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -323,6 +323,7 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); extern u64 host_xcr0; extern u64 host_xss; +extern u64 host_arch_capabilities; extern struct kvm_caps kvm_caps; -- cgit From 550ba57faa04042956079b8d09ec0f83bef8817f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 6 Jun 2023 17:43:10 -0700 Subject: KVM: VMX: Drop unnecessary vmx_fb_clear_ctrl_available "cache" Now that KVM snapshots the host's MSR_IA32_ARCH_CAPABILITIES, drop the similar snapshot/cache of whether or not KVM is allowed to manipulate MSR_IA32_MCU_OPT_CTRL.FB_CLEAR_DIS. The motivation for the cache was presumably to avoid the RDMSR, e.g. boot_cpu_has_bug() is quite cheap, and modifying the vCPU's MSR_IA32_ARCH_CAPABILITIES is an infrequent option and a relatively slow path. Cc: Pawan Gupta Reviewed-by: Pawan Gupta Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20230607004311.1420507-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index f0ec9acae86c..e6d1ce2d230c 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -237,9 +237,6 @@ static const struct { #define L1D_CACHE_ORDER 4 static void *vmx_l1d_flush_pages; -/* Control for disabling CPU Fill buffer clear */ -static bool __read_mostly vmx_fb_clear_ctrl_available; - static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) { struct page *page; @@ -366,14 +363,6 @@ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) return sysfs_emit(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); } -static void vmx_setup_fb_clear_ctrl(void) -{ - if ((host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) && - !boot_cpu_has_bug(X86_BUG_MDS) && - !boot_cpu_has_bug(X86_BUG_TAA)) - vmx_fb_clear_ctrl_available = true; -} - static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) { u64 msr; @@ -399,7 +388,9 @@ static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx) static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) { - vmx->disable_fb_clear = vmx_fb_clear_ctrl_available; + vmx->disable_fb_clear = (host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) && + !boot_cpu_has_bug(X86_BUG_MDS) && + !boot_cpu_has_bug(X86_BUG_TAA); /* * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS @@ -8626,8 +8617,6 @@ static int __init vmx_init(void) if (r) goto err_l1d_flush; - vmx_setup_fb_clear_ctrl(); - for_each_possible_cpu(cpu) { INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); -- cgit From 775bc098657b4996c8537d0eaf9c20138428b699 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 16:38:58 -0700 Subject: KVM: VMX: Drop manual TLB flush when migrating vmcs.APIC_ACCESS_ADDR MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove the superfluous flush of the current TLB in VMX's handling of migration of the APIC-access page, as a full TLB flush on all vCPUs will have already been performed in response to kvm_unmap_gfn_range() *if* there were SPTEs pointing at the APIC-access page. And if there were no valid SPTEs, then there can't possibly be TLB entries to flush. The extra flush was added by commit fb6c81984313 ("kvm: vmx: Flush TLB when the APIC-access address changes"), with the justification of "because the SDM says so". The SDM said, and still says: As detailed in Section xx.x.x, an access to the APIC-access page might not cause an APIC-access VM exit if software does not properly invalidate information that may be cached from the EPT paging structures. If EPT was in use on a logical processor at one time with EPTP X, it is recommended that software use the INVEPT instruction with the “single-context” INVEPT type and with EPTP X in the INVEPT descriptor before a VM entry on the same logical processor that enables EPT with EPTP X and either (a) the "virtualize APIC accesses" VM- execution control was changed from 0 to 1; or (b) the value of the APIC-access address was changed. But the "recommendation" for (b) is predicated on there actually being a valid EPT translation *and* possible TLB entries for the GPA (or guest VA when using shadow paging). It's possible that a different vCPU has established a mapping for the new page, but the current vCPU can't have entered the guest, i.e. can't have created a TLB entry, between flushing the old mappings and changing its vmcs.APIC_ACCESS_ADDR. kvm_unmap_gfn_range() waits for all vCPUs to ack KVM_REQ_APIC_PAGE_RELOAD, and then flushes remote TLBs (which may or may not also pend a request). Thus the vCPU is guaranteed to update vmcs.APIC_ACCESS_ADDR before re-entering the guest and before it can possibly create new TLB entries. In other words, KVM does flush in this case, it just does so earlier on while handling the page migration. Note, VMX also flushes if the vCPU is migrated to a new pCPU, i.e. if the vCPU is migrated to a pCPU that entered the guest for a different vCPU. Suggested-by: Yu Zhang Cc: Jim Mattson Reviewed-by: Yu Zhang Reviewed-by: Paolo Bonzini Link: https://lore.kernel.org/r/20230721233858.2343941-1-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 0ecf4be2c6af..3f868826db7d 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6767,8 +6767,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu) vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(pfn)); read_unlock(&vcpu->kvm->mmu_lock); - vmx_flush_tlb_current(vcpu); - + /* + * No need for a manual TLB flush at this point, KVM has already done a + * flush if there were SPTEs pointing at the previous page. + */ out: /* * Do not pin apic access page in memory, the MMU notifier -- cgit From d518f8cc10af7a1265595fccf553a3a5f7121e4f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 7 Jun 2023 13:35:17 -0700 Subject: KVM: SVM: Fix dead KVM_BUG() code in LBR MSR virtualization Refactor KVM's handling of LBR MSRs on SVM to avoid a second layer of case statements, and thus eliminate a dead KVM_BUG() call, which (a) will never be hit in the current code base and (b) if a future commit breaks things, will never fire as KVM passes "false" instead "true" or '1' for the KVM_BUG() condition. Reported-by: Michal Luczaj Cc: Yuan Yao Link: https://lore.kernel.org/r/20230607203519.1570167-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 45 ++++++++++++++++----------------------------- 1 file changed, 16 insertions(+), 29 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 1bc0936bbd51..623916221ff7 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -980,43 +980,22 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu) svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb); } -static int svm_get_lbr_msr(struct vcpu_svm *svm, u32 index) +static struct vmcb *svm_get_lbr_vmcb(struct vcpu_svm *svm) { /* - * If the LBR virtualization is disabled, the LBR msrs are always - * kept in the vmcb01 to avoid copying them on nested guest entries. - * - * If nested, and the LBR virtualization is enabled/disabled, the msrs - * are moved between the vmcb01 and vmcb02 as needed. + * If LBR virtualization is disabled, the LBR MSRs are always kept in + * vmcb01. If LBR virtualization is enabled and L1 is running VMs of + * its own, the MSRs are moved between vmcb01 and vmcb02 as needed. */ - struct vmcb *vmcb = - (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) ? - svm->vmcb : svm->vmcb01.ptr; - - switch (index) { - case MSR_IA32_DEBUGCTLMSR: - return vmcb->save.dbgctl; - case MSR_IA32_LASTBRANCHFROMIP: - return vmcb->save.br_from; - case MSR_IA32_LASTBRANCHTOIP: - return vmcb->save.br_to; - case MSR_IA32_LASTINTFROMIP: - return vmcb->save.last_excp_from; - case MSR_IA32_LASTINTTOIP: - return vmcb->save.last_excp_to; - default: - KVM_BUG(false, svm->vcpu.kvm, - "%s: Unknown MSR 0x%x", __func__, index); - return 0; - } + return svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK ? svm->vmcb : + svm->vmcb01.ptr; } void svm_update_lbrv(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - bool enable_lbrv = svm_get_lbr_msr(svm, MSR_IA32_DEBUGCTLMSR) & - DEBUGCTLMSR_LBR; + bool enable_lbrv = svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR; bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK); @@ -2835,11 +2814,19 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = svm->tsc_aux; break; case MSR_IA32_DEBUGCTLMSR: + msr_info->data = svm_get_lbr_vmcb(svm)->save.dbgctl; + break; case MSR_IA32_LASTBRANCHFROMIP: + msr_info->data = svm_get_lbr_vmcb(svm)->save.br_from; + break; case MSR_IA32_LASTBRANCHTOIP: + msr_info->data = svm_get_lbr_vmcb(svm)->save.br_to; + break; case MSR_IA32_LASTINTFROMIP: + msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_from; + break; case MSR_IA32_LASTINTTOIP: - msr_info->data = svm_get_lbr_msr(svm, msr_info->index); + msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_to; break; case MSR_VM_HSAVE_PA: msr_info->data = svm->nested.hsave_msr; -- cgit From 41dfb5f13ed9101b217e7085e5b5fb07d705e27f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 7 Jun 2023 13:35:18 -0700 Subject: KVM: SVM: Clean up handling of LBR virtualization enabled Clean up the enable_lbrv computation in svm_update_lbrv() to consolidate the logic for computing enable_lbrv into a single statement, and to remove the coding style violations (lack of curly braces on nested if). No functional change intended. Link: https://lore.kernel.org/r/20230607203519.1570167-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 623916221ff7..6fa918b588d2 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -994,15 +994,10 @@ static struct vmcb *svm_get_lbr_vmcb(struct vcpu_svm *svm) void svm_update_lbrv(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - - bool enable_lbrv = svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR; - - bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext & - LBR_CTL_ENABLE_MASK); - - if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled)) - if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)) - enable_lbrv = true; + bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK; + bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) || + (is_guest_mode(vcpu) && svm->lbrv_enabled && + (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)); if (enable_lbrv == current_enable_lbrv) return; -- cgit From a85cd52d720588b0060a0ebac5c7f497ad6bcd86 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 7 Jun 2023 13:35:19 -0700 Subject: KVM: SVM: Use svm_get_lbr_vmcb() helper to handle writes to DEBUGCTL Use the recently introduced svm_get_lbr_vmcb() instead an open coded equivalent to retrieve the target VMCB when emulating writes to MSR_IA32_DEBUGCTLMSR. No functional change intended. Link: https://lore.kernel.org/r/20230607203519.1570167-4-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 6fa918b588d2..d76c8a0764f1 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3052,13 +3052,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) if (data & DEBUGCTL_RESERVED_BITS) return 1; - if (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) - svm->vmcb->save.dbgctl = data; - else - svm->vmcb01.ptr->save.dbgctl = data; - + svm_get_lbr_vmcb(svm)->save.dbgctl = data; svm_update_lbrv(vcpu); - break; case MSR_VM_HSAVE_PA: /* -- cgit From 0033fa35491680bce65ff78fb41445f472aa5baa Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 6 Jun 2023 18:02:03 -0700 Subject: KVM: x86/pmu: Use enums instead of hardcoded magic for arch event indices Add "enum intel_pmu_architectural_events" to replace the magic numbers for the (pseudo-)architectural events, and to give a meaningful name to each event so that new readers don't need psychic powers to understand what the code is doing. Cc: Aaron Lewis Cc: Like Xu Reviewed-by: Like Xu Link: https://lore.kernel.org/r/20230607010206.1425277-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/pmu_intel.c | 55 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 80c769c58a87..1109cbdedcae 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -22,23 +22,51 @@ #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0) +enum intel_pmu_architectural_events { + /* + * The order of the architectural events matters as support for each + * event is enumerated via CPUID using the index of the event. + */ + INTEL_ARCH_CPU_CYCLES, + INTEL_ARCH_INSTRUCTIONS_RETIRED, + INTEL_ARCH_REFERENCE_CYCLES, + INTEL_ARCH_LLC_REFERENCES, + INTEL_ARCH_LLC_MISSES, + INTEL_ARCH_BRANCHES_RETIRED, + INTEL_ARCH_BRANCHES_MISPREDICTED, + + NR_REAL_INTEL_ARCH_EVENTS, + + /* + * Pseudo-architectural event used to implement IA32_FIXED_CTR2, a.k.a. + * TSC reference cycles. The architectural reference cycles event may + * or may not actually use the TSC as the reference, e.g. might use the + * core crystal clock or the bus clock (yeah, "architectural"). + */ + PSEUDO_ARCH_REFERENCE_CYCLES = NR_REAL_INTEL_ARCH_EVENTS, + NR_INTEL_ARCH_EVENTS, +}; + static struct { u8 eventsel; u8 unit_mask; } const intel_arch_events[] = { - [0] = { 0x3c, 0x00 }, - [1] = { 0xc0, 0x00 }, - [2] = { 0x3c, 0x01 }, - [3] = { 0x2e, 0x4f }, - [4] = { 0x2e, 0x41 }, - [5] = { 0xc4, 0x00 }, - [6] = { 0xc5, 0x00 }, - /* The above index must match CPUID 0x0A.EBX bit vector */ - [7] = { 0x00, 0x03 }, + [INTEL_ARCH_CPU_CYCLES] = { 0x3c, 0x00 }, + [INTEL_ARCH_INSTRUCTIONS_RETIRED] = { 0xc0, 0x00 }, + [INTEL_ARCH_REFERENCE_CYCLES] = { 0x3c, 0x01 }, + [INTEL_ARCH_LLC_REFERENCES] = { 0x2e, 0x4f }, + [INTEL_ARCH_LLC_MISSES] = { 0x2e, 0x41 }, + [INTEL_ARCH_BRANCHES_RETIRED] = { 0xc4, 0x00 }, + [INTEL_ARCH_BRANCHES_MISPREDICTED] = { 0xc5, 0x00 }, + [PSEUDO_ARCH_REFERENCE_CYCLES] = { 0x00, 0x03 }, }; /* mapping between fixed pmc index and intel_arch_events array */ -static int fixed_pmc_events[] = {1, 0, 7}; +static int fixed_pmc_events[] = { + [0] = INTEL_ARCH_INSTRUCTIONS_RETIRED, + [1] = INTEL_ARCH_CPU_CYCLES, + [2] = PSEUDO_ARCH_REFERENCE_CYCLES, +}; static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) { @@ -80,13 +108,16 @@ static bool intel_hw_event_available(struct kvm_pmc *pmc) u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; int i; - for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) { + BUILD_BUG_ON(ARRAY_SIZE(intel_arch_events) != NR_INTEL_ARCH_EVENTS); + + for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++) { if (intel_arch_events[i].eventsel != event_select || intel_arch_events[i].unit_mask != unit_mask) continue; /* disable event that reported as not present by cpuid */ - if ((i < 7) && !(pmu->available_event_types & (1 << i))) + if ((i < PSEUDO_ARCH_REFERENCE_CYCLES) && + !(pmu->available_event_types & (1 << i))) return false; break; -- cgit From bc9658999b3e44cbad964fe9fbab5a25bd4f5ed3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 6 Jun 2023 18:02:04 -0700 Subject: KVM: x86/pmu: Simplify intel_hw_event_available() Walk only the "real", i.e. non-pseudo, architectural events when checking if a hardware event is available, i.e. isn't disabled by guest CPUID. Skipping pseudo-arch events in the loop body is unnecessarily convoluted, especially now that KVM has enums that delineate between real and pseudo events. Link: https://lore.kernel.org/r/20230607010206.1425277-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/pmu_intel.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 1109cbdedcae..db23697f3141 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -110,17 +110,16 @@ static bool intel_hw_event_available(struct kvm_pmc *pmc) BUILD_BUG_ON(ARRAY_SIZE(intel_arch_events) != NR_INTEL_ARCH_EVENTS); - for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++) { + /* + * Disallow events reported as unavailable in guest CPUID. Note, this + * doesn't apply to pseudo-architectural events. + */ + for (i = 0; i < NR_REAL_INTEL_ARCH_EVENTS; i++) { if (intel_arch_events[i].eventsel != event_select || intel_arch_events[i].unit_mask != unit_mask) continue; - /* disable event that reported as not present by cpuid */ - if ((i < PSEUDO_ARCH_REFERENCE_CYCLES) && - !(pmu->available_event_types & (1 << i))) - return false; - - break; + return pmu->available_event_types & BIT(i); } return true; -- cgit From 6d88d0ee5de142921598e5c0041792bbd860913d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 6 Jun 2023 18:02:05 -0700 Subject: KVM: x86/pmu: Require nr fixed_pmc_events to match nr max fixed counters Assert that the number of known fixed_pmc_events matches the max number of fixed counters supported by KVM, and clean up related code. Opportunistically extend setup_fixed_pmc_eventsel()'s use of array_index_nospec() to cover fixed_counters, as nr_arch_fixed_counters is set based on userspace input (but capped using KVM-controlled values). Link: https://lore.kernel.org/r/20230607010206.1425277-4-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/pmu_intel.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index db23697f3141..f2efa0bf7ae8 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -468,16 +468,17 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu) { - size_t size = ARRAY_SIZE(fixed_pmc_events); - struct kvm_pmc *pmc; - u32 event; int i; + BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_events) != KVM_PMC_MAX_FIXED); + for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { - pmc = &pmu->fixed_counters[i]; - event = fixed_pmc_events[array_index_nospec(i, size)]; + int index = array_index_nospec(i, KVM_PMC_MAX_FIXED); + struct kvm_pmc *pmc = &pmu->fixed_counters[index]; + u32 event = fixed_pmc_events[index]; + pmc->eventsel = (intel_arch_events[event].unit_mask << 8) | - intel_arch_events[event].eventsel; + intel_arch_events[event].eventsel; } } @@ -538,10 +539,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) if (pmu->version == 1) { pmu->nr_arch_fixed_counters = 0; } else { - pmu->nr_arch_fixed_counters = - min3(ARRAY_SIZE(fixed_pmc_events), - (size_t) edx.split.num_counters_fixed, - (size_t)kvm_pmu_cap.num_counters_fixed); + pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed, + kvm_pmu_cap.num_counters_fixed); edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed, kvm_pmu_cap.bit_width_fixed); pmu->counter_bitmask[KVM_PMC_FIXED] = -- cgit From 6de2ccc169683bf81feba163834dae7cdebdd826 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 6 Jun 2023 18:02:06 -0700 Subject: KVM: x86/pmu: Move .hw_event_available() check out of PMC filter helper Move the call to kvm_x86_pmu.hw_event_available(), which has nothing to with the userspace PMU filter, out of check_pmu_event_filter() and into its sole caller pmc_event_is_allowed(). pmc_event_is_allowed() didn't exist when commit 7aadaa988c5e ("KVM: x86/pmu: Drop amd_event_mapping[] in the KVM context"), so presumably the motivation for invoking .hw_event_available() from check_pmu_event_filter() was to avoid having to add multiple call sites. Link: https://lore.kernel.org/r/20230607010206.1425277-5-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/pmu.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index bf653df86112..edb89b51b383 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -382,9 +382,6 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc) struct kvm_x86_pmu_event_filter *filter; struct kvm *kvm = pmc->vcpu->kvm; - if (!static_call(kvm_x86_pmu_hw_event_available)(pmc)) - return false; - filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu); if (!filter) return true; @@ -398,6 +395,7 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc) static bool pmc_event_is_allowed(struct kvm_pmc *pmc) { return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) && + static_call(kvm_x86_pmu_hw_event_available)(pmc) && check_pmu_event_filter(pmc); } -- cgit From 41e90a69a49be2e3467e811ae8fdc17456af1b02 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 2 Jun 2023 16:32:49 -0700 Subject: KVM: x86: Retry APIC optimized map recalc if vCPU is added/enabled Retry the optimized APIC map recalculation if an APIC-enabled vCPU shows up between allocating the map and filling in the map data. Conditionally reschedule before retrying even though the number of vCPUs that can be created is bounded by KVM. Retrying a few thousand times isn't so slow as to be hugely problematic, but it's not blazing fast either. Reset xapic_id_mistach on each retry as a vCPU could change its xAPIC ID between loops, but do NOT reset max_id. The map size also factors in whether or not a vCPU's local APIC is hardware-enabled, i.e. userspace and/or the guest can theoretically keep KVM retrying indefinitely. The only downside is that KVM will allocate more memory than is strictly necessary if the vCPU with the highest x2APIC ID disabled its APIC while the recalculation was in-progress. Refresh kvm->arch.apic_map_dirty to opportunistically change it from DIRTY => UPDATE_IN_PROGRESS to avoid an unnecessary recalc from a different task, i.e. if another task is waiting to attempt an update (which is likely since a retry happens if and only if an update is required). Link: https://lore.kernel.org/r/20230602233250.1014316-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/lapic.c | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 113ca9661ab2..673880bc0762 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -376,7 +376,8 @@ void kvm_recalculate_apic_map(struct kvm *kvm) struct kvm_vcpu *vcpu; unsigned long i; u32 max_id = 255; /* enough space for any xAPIC ID */ - bool xapic_id_mismatch = false; + bool xapic_id_mismatch; + int r; /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */ if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN) @@ -386,9 +387,14 @@ void kvm_recalculate_apic_map(struct kvm *kvm) "Dirty APIC map without an in-kernel local APIC"); mutex_lock(&kvm->arch.apic_map_lock); + +retry: /* - * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map - * (if clean) or the APIC registers (if dirty). + * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map (if clean) + * or the APIC registers (if dirty). Note, on retry the map may have + * not yet been marked dirty by whatever task changed a vCPU's x2APIC + * ID, i.e. the map may still show up as in-progress. In that case + * this task still needs to retry and complete its calculation. */ if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty, DIRTY, UPDATE_IN_PROGRESS) == CLEAN) { @@ -397,6 +403,15 @@ void kvm_recalculate_apic_map(struct kvm *kvm) return; } + /* + * Reset the mismatch flag between attempts so that KVM does the right + * thing if a vCPU changes its xAPIC ID, but do NOT reset max_id, i.e. + * keep max_id strictly increasing. Disallowing max_id from shrinking + * ensures KVM won't get stuck in an infinite loop, e.g. if the vCPU + * with the highest x2APIC ID is toggling its APIC on and off. + */ + xapic_id_mismatch = false; + kvm_for_each_vcpu(i, vcpu, kvm) if (kvm_apic_present(vcpu)) max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic)); @@ -415,9 +430,15 @@ void kvm_recalculate_apic_map(struct kvm *kvm) if (!kvm_apic_present(vcpu)) continue; - if (kvm_recalculate_phys_map(new, vcpu, &xapic_id_mismatch)) { + r = kvm_recalculate_phys_map(new, vcpu, &xapic_id_mismatch); + if (r) { kvfree(new); new = NULL; + if (r == -E2BIG) { + cond_resched(); + goto retry; + } + goto out; } -- cgit From b23c83ad2c638420ec0608a9de354507c41bec29 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:41 -0700 Subject: x86/reboot: VMCLEAR active VMCSes before emergency reboot VMCLEAR active VMCSes before any emergency reboot, not just if the kernel may kexec into a new kernel after a crash. Per Intel's SDM, the VMX architecture doesn't require the CPU to flush the VMCS cache on INIT. If an emergency reboot doesn't RESET CPUs, cached VMCSes could theoretically be kept and only be written back to memory after the new kernel is booted, i.e. could effectively corrupt memory after reboot. Opportunistically remove the setting of the global pointer to NULL to make checkpatch happy. Cc: Andrew Cooper Link: https://lore.kernel.org/r/20230721201859.2307736-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kexec.h | 2 -- arch/x86/include/asm/reboot.h | 2 ++ arch/x86/kernel/crash.c | 31 ------------------------------- arch/x86/kernel/reboot.c | 22 ++++++++++++++++++++++ arch/x86/kvm/vmx/vmx.c | 10 +++------- 5 files changed, 27 insertions(+), 40 deletions(-) diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index 5b77bbc28f96..819046974b99 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -205,8 +205,6 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image); #endif #endif -typedef void crash_vmclear_fn(void); -extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss; extern void kdump_nmi_shootdown_cpus(void); #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index 9177b4354c3f..dc201724a643 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -25,6 +25,8 @@ void __noreturn machine_real_restart(unsigned int type); #define MRR_BIOS 0 #define MRR_APM 1 +typedef void crash_vmclear_fn(void); +extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss; void cpu_emergency_disable_virtualization(void); typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index cdd92ab43cda..54cd959cb316 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -48,38 +48,12 @@ struct crash_memmap_data { unsigned int type; }; -/* - * This is used to VMCLEAR all VMCSs loaded on the - * processor. And when loading kvm_intel module, the - * callback function pointer will be assigned. - * - * protected by rcu. - */ -crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL; -EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss); - -static inline void cpu_crash_vmclear_loaded_vmcss(void) -{ - crash_vmclear_fn *do_vmclear_operation = NULL; - - rcu_read_lock(); - do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss); - if (do_vmclear_operation) - do_vmclear_operation(); - rcu_read_unlock(); -} - #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) static void kdump_nmi_callback(int cpu, struct pt_regs *regs) { crash_save_cpu(regs, cpu); - /* - * VMCLEAR VMCSs loaded on all cpus if needed. - */ - cpu_crash_vmclear_loaded_vmcss(); - /* * Disable Intel PT to stop its logging */ @@ -133,11 +107,6 @@ void native_machine_crash_shutdown(struct pt_regs *regs) crash_smp_send_stop(); - /* - * VMCLEAR VMCSs loaded on this cpu if needed. - */ - cpu_crash_vmclear_loaded_vmcss(); - cpu_emergency_disable_virtualization(); /* diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 3adbe97015c1..3fa4c6717a1d 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -787,6 +787,26 @@ void machine_crash_shutdown(struct pt_regs *regs) } #endif +/* + * This is used to VMCLEAR all VMCSs loaded on the + * processor. And when loading kvm_intel module, the + * callback function pointer will be assigned. + * + * protected by rcu. + */ +crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss; +EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss); + +static inline void cpu_crash_vmclear_loaded_vmcss(void) +{ + crash_vmclear_fn *do_vmclear_operation = NULL; + + rcu_read_lock(); + do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss); + if (do_vmclear_operation) + do_vmclear_operation(); + rcu_read_unlock(); +} /* This is the CPU performing the emergency shutdown work. */ int crashing_cpu = -1; @@ -798,6 +818,8 @@ int crashing_cpu = -1; */ void cpu_emergency_disable_virtualization(void) { + cpu_crash_vmclear_loaded_vmcss(); + cpu_emergency_vmxoff(); cpu_emergency_svm_disable(); } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e6d1ce2d230c..75351477f090 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include #include #include @@ -725,7 +725,6 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, return ret; } -#ifdef CONFIG_KEXEC_CORE static void crash_vmclear_local_loaded_vmcss(void) { int cpu = raw_smp_processor_id(); @@ -735,7 +734,6 @@ static void crash_vmclear_local_loaded_vmcss(void) loaded_vmcss_on_cpu_link) vmcs_clear(v->vmcs); } -#endif /* CONFIG_KEXEC_CORE */ static void __loaded_vmcs_clear(void *arg) { @@ -8573,10 +8571,9 @@ static void __vmx_exit(void) { allow_smaller_maxphyaddr = false; -#ifdef CONFIG_KEXEC_CORE RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); synchronize_rcu(); -#endif + vmx_cleanup_l1d_flush(); } @@ -8623,10 +8620,9 @@ static int __init vmx_init(void) pi_init_cpu(cpu); } -#ifdef CONFIG_KEXEC_CORE rcu_assign_pointer(crash_vmclear_loaded_vmcss, crash_vmclear_local_loaded_vmcss); -#endif + vmx_check_vmcs12_offsets(); /* -- cgit From 5e408396c60cd0f0b53a43713016b6d6af8d69e0 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:42 -0700 Subject: x86/reboot: Harden virtualization hooks for emergency reboot Provide dedicated helpers to (un)register virt hooks used during an emergency crash/reboot, and WARN if there is an attempt to overwrite the registered callback, or an attempt to do an unpaired unregister. Opportunsitically use rcu_assign_pointer() instead of RCU_INIT_POINTER(), mainly so that the set/unset paths are more symmetrical, but also because any performance gains from using RCU_INIT_POINTER() are meaningless for this code. Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230721201859.2307736-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/reboot.h | 5 +++-- arch/x86/kernel/reboot.c | 30 ++++++++++++++++++++++++------ arch/x86/kvm/vmx/vmx.c | 6 ++---- 3 files changed, 29 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index dc201724a643..74c6a624d166 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -25,8 +25,9 @@ void __noreturn machine_real_restart(unsigned int type); #define MRR_BIOS 0 #define MRR_APM 1 -typedef void crash_vmclear_fn(void); -extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss; +typedef void (cpu_emergency_virt_cb)(void); +void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback); +void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback); void cpu_emergency_disable_virtualization(void); typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 3fa4c6717a1d..62ccedeb5e2b 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -794,17 +794,35 @@ void machine_crash_shutdown(struct pt_regs *regs) * * protected by rcu. */ -crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss; -EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss); +static cpu_emergency_virt_cb __rcu *cpu_emergency_virt_callback; + +void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback) +{ + if (WARN_ON_ONCE(rcu_access_pointer(cpu_emergency_virt_callback))) + return; + + rcu_assign_pointer(cpu_emergency_virt_callback, callback); +} +EXPORT_SYMBOL_GPL(cpu_emergency_register_virt_callback); + +void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback) +{ + if (WARN_ON_ONCE(rcu_access_pointer(cpu_emergency_virt_callback) != callback)) + return; + + rcu_assign_pointer(cpu_emergency_virt_callback, NULL); + synchronize_rcu(); +} +EXPORT_SYMBOL_GPL(cpu_emergency_unregister_virt_callback); static inline void cpu_crash_vmclear_loaded_vmcss(void) { - crash_vmclear_fn *do_vmclear_operation = NULL; + cpu_emergency_virt_cb *callback; rcu_read_lock(); - do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss); - if (do_vmclear_operation) - do_vmclear_operation(); + callback = rcu_dereference(cpu_emergency_virt_callback); + if (callback) + callback(); rcu_read_unlock(); } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 75351477f090..661ba09685b8 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -8571,8 +8571,7 @@ static void __vmx_exit(void) { allow_smaller_maxphyaddr = false; - RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); - synchronize_rcu(); + cpu_emergency_unregister_virt_callback(crash_vmclear_local_loaded_vmcss); vmx_cleanup_l1d_flush(); } @@ -8620,8 +8619,7 @@ static int __init vmx_init(void) pi_init_cpu(cpu); } - rcu_assign_pointer(crash_vmclear_loaded_vmcss, - crash_vmclear_local_loaded_vmcss); + cpu_emergency_register_virt_callback(crash_vmclear_local_loaded_vmcss); vmx_check_vmcs12_offsets(); -- cgit From 119b5cb4ffd0166f3e98e9ee042f5046f7744f28 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:43 -0700 Subject: x86/reboot: KVM: Handle VMXOFF in KVM's reboot callback Use KVM VMX's reboot/crash callback to do VMXOFF in an emergency instead of manually and blindly doing VMXOFF. There's no need to attempt VMXOFF if a hypervisor, i.e. KVM, isn't loaded/active, i.e. if the CPU can't possibly be post-VMXON. Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230721201859.2307736-4-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/virtext.h | 10 ---------- arch/x86/kernel/reboot.c | 29 +++++++++-------------------- arch/x86/kvm/vmx/vmx.c | 8 +++++--- 3 files changed, 14 insertions(+), 33 deletions(-) diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 3b12e6b99412..5bc29fab15da 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -70,16 +70,6 @@ static inline void __cpu_emergency_vmxoff(void) cpu_vmxoff(); } -/** Disable VMX if it is supported and enabled on the current CPU - */ -static inline void cpu_emergency_vmxoff(void) -{ - if (cpu_has_vmx()) - __cpu_emergency_vmxoff(); -} - - - /* * SVM functions: diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 62ccedeb5e2b..d2d0f2672a64 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -787,13 +787,7 @@ void machine_crash_shutdown(struct pt_regs *regs) } #endif -/* - * This is used to VMCLEAR all VMCSs loaded on the - * processor. And when loading kvm_intel module, the - * callback function pointer will be assigned. - * - * protected by rcu. - */ +/* RCU-protected callback to disable virtualization prior to reboot. */ static cpu_emergency_virt_cb __rcu *cpu_emergency_virt_callback; void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback) @@ -815,17 +809,6 @@ void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback) } EXPORT_SYMBOL_GPL(cpu_emergency_unregister_virt_callback); -static inline void cpu_crash_vmclear_loaded_vmcss(void) -{ - cpu_emergency_virt_cb *callback; - - rcu_read_lock(); - callback = rcu_dereference(cpu_emergency_virt_callback); - if (callback) - callback(); - rcu_read_unlock(); -} - /* This is the CPU performing the emergency shutdown work. */ int crashing_cpu = -1; @@ -836,9 +819,15 @@ int crashing_cpu = -1; */ void cpu_emergency_disable_virtualization(void) { - cpu_crash_vmclear_loaded_vmcss(); + cpu_emergency_virt_cb *callback; + + rcu_read_lock(); + callback = rcu_dereference(cpu_emergency_virt_callback); + if (callback) + callback(); + rcu_read_unlock(); - cpu_emergency_vmxoff(); + /* KVM_AMD doesn't yet utilize the common callback. */ cpu_emergency_svm_disable(); } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 661ba09685b8..df991f15baf4 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -725,7 +725,7 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, return ret; } -static void crash_vmclear_local_loaded_vmcss(void) +static void vmx_emergency_disable(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v; @@ -733,6 +733,8 @@ static void crash_vmclear_local_loaded_vmcss(void) list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) vmcs_clear(v->vmcs); + + __cpu_emergency_vmxoff(); } static void __loaded_vmcs_clear(void *arg) @@ -8571,7 +8573,7 @@ static void __vmx_exit(void) { allow_smaller_maxphyaddr = false; - cpu_emergency_unregister_virt_callback(crash_vmclear_local_loaded_vmcss); + cpu_emergency_unregister_virt_callback(vmx_emergency_disable); vmx_cleanup_l1d_flush(); } @@ -8619,7 +8621,7 @@ static int __init vmx_init(void) pi_init_cpu(cpu); } - cpu_emergency_register_virt_callback(crash_vmclear_local_loaded_vmcss); + cpu_emergency_register_virt_callback(vmx_emergency_disable); vmx_check_vmcs12_offsets(); -- cgit From baeb4de7ad12b700a91f2a7be8e1c0389a5c8fd4 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:44 -0700 Subject: x86/reboot: KVM: Disable SVM during reboot via virt/KVM reboot callback Use the virt callback to disable SVM (and set GIF=1) during an emergency instead of blindly attempting to disable SVM. Like the VMX case, if a hypervisor, i.e. KVM, isn't loaded/active, SVM can't be in use. Acked-by: Kai Huang Link: https://lore.kernel.org/r/20230721201859.2307736-5-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/virtext.h | 8 -------- arch/x86/kernel/reboot.c | 3 --- arch/x86/kvm/svm/svm.c | 19 +++++++++++++++++-- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 5bc29fab15da..aaed66249ccf 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -133,12 +133,4 @@ fault: } } -/** Makes sure SVM is disabled, if it is supported on the CPU - */ -static inline void cpu_emergency_svm_disable(void) -{ - if (cpu_has_svm(NULL)) - cpu_svm_disable(); -} - #endif /* _ASM_X86_VIRTEX_H */ diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index d2d0f2672a64..48ad2d1ff83d 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -826,9 +826,6 @@ void cpu_emergency_disable_virtualization(void) if (callback) callback(); rcu_read_unlock(); - - /* KVM_AMD doesn't yet utilize the common callback. */ - cpu_emergency_svm_disable(); } #if defined(CONFIG_SMP) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index d381ad424554..1ae9c2c7eacb 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -563,6 +564,11 @@ out: preempt_enable(); } +static void svm_emergency_disable(void) +{ + cpu_svm_disable(); +} + static void svm_hardware_disable(void) { /* Make sure we clean up behind us */ @@ -5209,6 +5215,13 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = { .pmu_ops = &amd_pmu_ops, }; +static void __svm_exit(void) +{ + kvm_x86_vendor_exit(); + + cpu_emergency_unregister_virt_callback(svm_emergency_disable); +} + static int __init svm_init(void) { int r; @@ -5222,6 +5235,8 @@ static int __init svm_init(void) if (r) return r; + cpu_emergency_register_virt_callback(svm_emergency_disable); + /* * Common KVM initialization _must_ come last, after this, /dev/kvm is * exposed to userspace! @@ -5234,14 +5249,14 @@ static int __init svm_init(void) return 0; err_kvm_init: - kvm_x86_vendor_exit(); + __svm_exit(); return r; } static void __exit svm_exit(void) { kvm_exit(); - kvm_x86_vendor_exit(); + __svm_exit(); } module_init(svm_init) -- cgit From ad93c1a7c0102c93e92bf0c06412a1f588e015ab Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:45 -0700 Subject: x86/reboot: Assert that IRQs are disabled when turning off virtualization Assert that IRQs are disabled when turning off virtualization in an emergency. KVM enables hardware via on_each_cpu(), i.e. could re-enable hardware if a pending IPI were delivered after disabling virtualization. Remove a misleading comment from emergency_reboot_disable_virtualization() about "just" needing to guarantee the CPU is stable (see above). Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230721201859.2307736-6-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kernel/reboot.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 48ad2d1ff83d..4cad7183b89e 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -532,7 +532,6 @@ static inline void nmi_shootdown_cpus_on_restart(void); static void emergency_reboot_disable_virtualization(void) { - /* Just make sure we won't change CPUs while doing this */ local_irq_disable(); /* @@ -821,6 +820,13 @@ void cpu_emergency_disable_virtualization(void) { cpu_emergency_virt_cb *callback; + /* + * IRQs must be disabled as KVM enables virtualization in hardware via + * function call IPIs, i.e. IRQs need to be disabled to guarantee + * virtualization stays disabled. + */ + lockdep_assert_irqs_disabled(); + rcu_read_lock(); callback = rcu_dereference(cpu_emergency_virt_callback); if (callback) -- cgit From edc8deb087d884bac2f7013f0c23af73042b23a7 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:46 -0700 Subject: x86/reboot: Hoist "disable virt" helpers above "emergency reboot" path Move the various "disable virtualization" helpers above the emergency reboot path so that emergency_reboot_disable_virtualization() can be stubbed out in a future patch if neither KVM_INTEL nor KVM_AMD is enabled, i.e. if there is no in-tree user of CPU virtualization. No functional change intended. Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230721201859.2307736-7-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kernel/reboot.c | 90 ++++++++++++++++++++++++------------------------ 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 4cad7183b89e..85cb2dfcb67b 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -530,6 +530,51 @@ static inline void kb_wait(void) static inline void nmi_shootdown_cpus_on_restart(void); +/* RCU-protected callback to disable virtualization prior to reboot. */ +static cpu_emergency_virt_cb __rcu *cpu_emergency_virt_callback; + +void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback) +{ + if (WARN_ON_ONCE(rcu_access_pointer(cpu_emergency_virt_callback))) + return; + + rcu_assign_pointer(cpu_emergency_virt_callback, callback); +} +EXPORT_SYMBOL_GPL(cpu_emergency_register_virt_callback); + +void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback) +{ + if (WARN_ON_ONCE(rcu_access_pointer(cpu_emergency_virt_callback) != callback)) + return; + + rcu_assign_pointer(cpu_emergency_virt_callback, NULL); + synchronize_rcu(); +} +EXPORT_SYMBOL_GPL(cpu_emergency_unregister_virt_callback); + +/* + * Disable virtualization, i.e. VMX or SVM, to ensure INIT is recognized during + * reboot. VMX blocks INIT if the CPU is post-VMXON, and SVM blocks INIT if + * GIF=0, i.e. if the crash occurred between CLGI and STGI. + */ +void cpu_emergency_disable_virtualization(void) +{ + cpu_emergency_virt_cb *callback; + + /* + * IRQs must be disabled as KVM enables virtualization in hardware via + * function call IPIs, i.e. IRQs need to be disabled to guarantee + * virtualization stays disabled. + */ + lockdep_assert_irqs_disabled(); + + rcu_read_lock(); + callback = rcu_dereference(cpu_emergency_virt_callback); + if (callback) + callback(); + rcu_read_unlock(); +} + static void emergency_reboot_disable_virtualization(void) { local_irq_disable(); @@ -786,54 +831,9 @@ void machine_crash_shutdown(struct pt_regs *regs) } #endif -/* RCU-protected callback to disable virtualization prior to reboot. */ -static cpu_emergency_virt_cb __rcu *cpu_emergency_virt_callback; - -void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback) -{ - if (WARN_ON_ONCE(rcu_access_pointer(cpu_emergency_virt_callback))) - return; - - rcu_assign_pointer(cpu_emergency_virt_callback, callback); -} -EXPORT_SYMBOL_GPL(cpu_emergency_register_virt_callback); - -void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback) -{ - if (WARN_ON_ONCE(rcu_access_pointer(cpu_emergency_virt_callback) != callback)) - return; - - rcu_assign_pointer(cpu_emergency_virt_callback, NULL); - synchronize_rcu(); -} -EXPORT_SYMBOL_GPL(cpu_emergency_unregister_virt_callback); - /* This is the CPU performing the emergency shutdown work. */ int crashing_cpu = -1; -/* - * Disable virtualization, i.e. VMX or SVM, to ensure INIT is recognized during - * reboot. VMX blocks INIT if the CPU is post-VMXON, and SVM blocks INIT if - * GIF=0, i.e. if the crash occurred between CLGI and STGI. - */ -void cpu_emergency_disable_virtualization(void) -{ - cpu_emergency_virt_cb *callback; - - /* - * IRQs must be disabled as KVM enables virtualization in hardware via - * function call IPIs, i.e. IRQs need to be disabled to guarantee - * virtualization stays disabled. - */ - lockdep_assert_irqs_disabled(); - - rcu_read_lock(); - callback = rcu_dereference(cpu_emergency_virt_callback); - if (callback) - callback(); - rcu_read_unlock(); -} - #if defined(CONFIG_SMP) static nmi_shootdown_cb shootdown_callback; -- cgit From 59765db5fc82726b32876b794667e2c6936a98ab Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:47 -0700 Subject: x86/reboot: Disable virtualization during reboot iff callback is registered Attempt to disable virtualization during an emergency reboot if and only if there is a registered virt callback, i.e. iff a hypervisor (KVM) is active. If there's no active hypervisor, then the CPU can't be operating with VMX or SVM enabled (barring an egregious bug). Checking for a valid callback instead of simply for SVM or VMX support can also eliminates spurious NMIs by avoiding the unecessary call to nmi_shootdown_cpus_on_restart(). Note, IRQs are disabled, which prevents KVM from coming along and enabling virtualization after the fact. Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230721201859.2307736-8-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kernel/reboot.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 85cb2dfcb67b..98e5db3fd7f4 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -589,7 +588,7 @@ static void emergency_reboot_disable_virtualization(void) * Do the NMI shootdown even if virtualization is off on _this_ CPU, as * other CPUs may have virtualization enabled. */ - if (cpu_has_vmx() || cpu_has_svm(NULL)) { + if (rcu_access_pointer(cpu_emergency_virt_callback)) { /* Safely force _this_ CPU out of VMX/SVM operation. */ cpu_emergency_disable_virtualization(); -- cgit From 261cd5ed934e6923187cf1c9eaa6cb63f2b81212 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:48 -0700 Subject: x86/reboot: Expose VMCS crash hooks if and only if KVM_{INTEL,AMD} is enabled Expose the crash/reboot hooks used by KVM to disable virtualization in hardware and unblock INIT only if there's a potential in-tree user, i.e. either KVM_INTEL or KVM_AMD is enabled. Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230721201859.2307736-9-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/reboot.h | 4 ++++ arch/x86/kernel/reboot.c | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index 74c6a624d166..6536873f8fc0 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -25,10 +25,14 @@ void __noreturn machine_real_restart(unsigned int type); #define MRR_BIOS 0 #define MRR_APM 1 +#if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD) typedef void (cpu_emergency_virt_cb)(void); void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback); void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback); void cpu_emergency_disable_virtualization(void); +#else +static inline void cpu_emergency_disable_virtualization(void) {} +#endif /* CONFIG_KVM_INTEL || CONFIG_KVM_AMD */ typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); void nmi_shootdown_cpus(nmi_shootdown_cb callback); diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 98e5db3fd7f4..830425e6d38e 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -529,6 +529,7 @@ static inline void kb_wait(void) static inline void nmi_shootdown_cpus_on_restart(void); +#if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD) /* RCU-protected callback to disable virtualization prior to reboot. */ static cpu_emergency_virt_cb __rcu *cpu_emergency_virt_callback; @@ -596,7 +597,9 @@ static void emergency_reboot_disable_virtualization(void) nmi_shootdown_cpus_on_restart(); } } - +#else +static void emergency_reboot_disable_virtualization(void) { } +#endif /* CONFIG_KVM_INTEL || CONFIG_KVM_AMD */ void __attribute__((weak)) mach_reboot_fixups(void) { -- cgit From b6a6af0d19cea17a3c1e34f81f0d521775f94ab5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:49 -0700 Subject: x86/virt: KVM: Open code cpu_has_vmx() in KVM VMX Fold the raw CPUID check for VMX into kvm_is_vmx_supported(), its sole user. Keep the check even though KVM also checks X86_FEATURE_VMX, as the intent is to provide a unique error message if VMX is unsupported by hardware, whereas X86_FEATURE_VMX may be clear due to firmware and/or kernel actions. No functional change intended. Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230721201859.2307736-10-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/virtext.h | 10 ---------- arch/x86/kvm/vmx/vmx.c | 2 +- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index aaed66249ccf..b1171a5ad452 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -22,14 +22,6 @@ /* * VMX functions: */ - -static inline int cpu_has_vmx(void) -{ - unsigned long ecx = cpuid_ecx(1); - return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */ -} - - /** * cpu_vmxoff() - Disable VMX on the current CPU * @@ -61,8 +53,6 @@ static inline int cpu_vmx_enabled(void) } /** Disable VMX if it is enabled on the current CPU - * - * You shouldn't call this if cpu_has_vmx() returns 0. */ static inline void __cpu_emergency_vmxoff(void) { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index df991f15baf4..4b9d68826172 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2699,7 +2699,7 @@ static bool kvm_is_vmx_supported(void) { int cpu = raw_smp_processor_id(); - if (!cpu_has_vmx()) { + if (!(cpuid_ecx(1) & feature_bit(VMX))) { pr_err("VMX not supported by CPU %d\n", cpu); return false; } -- cgit From 22e420e127399f2e75c8efddff763e92247f3da7 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:50 -0700 Subject: x86/virt: KVM: Move VMXOFF helpers into KVM VMX Now that VMX is disabled in emergencies via the virt callbacks, move the VMXOFF helpers into KVM, the only remaining user. No functional change intended. Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230721201859.2307736-11-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/virtext.h | 42 ------------------------------------------ arch/x86/kvm/vmx/vmx.c | 29 ++++++++++++++++++++++++++--- 2 files changed, 26 insertions(+), 45 deletions(-) diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index b1171a5ad452..a27801f2bc71 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -19,48 +19,6 @@ #include #include -/* - * VMX functions: - */ -/** - * cpu_vmxoff() - Disable VMX on the current CPU - * - * Disable VMX and clear CR4.VMXE (even if VMXOFF faults) - * - * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to - * atomically track post-VMXON state, e.g. this may be called in NMI context. - * Eat all faults as all other faults on VMXOFF faults are mode related, i.e. - * faults are guaranteed to be due to the !post-VMXON check unless the CPU is - * magically in RM, VM86, compat mode, or at CPL>0. - */ -static inline int cpu_vmxoff(void) -{ - asm_volatile_goto("1: vmxoff\n\t" - _ASM_EXTABLE(1b, %l[fault]) - ::: "cc", "memory" : fault); - - cr4_clear_bits(X86_CR4_VMXE); - return 0; - -fault: - cr4_clear_bits(X86_CR4_VMXE); - return -EIO; -} - -static inline int cpu_vmx_enabled(void) -{ - return __read_cr4() & X86_CR4_VMXE; -} - -/** Disable VMX if it is enabled on the current CPU - */ -static inline void __cpu_emergency_vmxoff(void) -{ - if (cpu_vmx_enabled()) - cpu_vmxoff(); -} - - /* * SVM functions: */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 4b9d68826172..64377fe3a990 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -47,7 +47,6 @@ #include #include #include -#include #include #include "capabilities.h" @@ -725,6 +724,29 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, return ret; } +/* + * Disable VMX and clear CR4.VMXE (even if VMXOFF faults) + * + * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to + * atomically track post-VMXON state, e.g. this may be called in NMI context. + * Eat all faults as all other faults on VMXOFF faults are mode related, i.e. + * faults are guaranteed to be due to the !post-VMXON check unless the CPU is + * magically in RM, VM86, compat mode, or at CPL>0. + */ +static int kvm_cpu_vmxoff(void) +{ + asm_volatile_goto("1: vmxoff\n\t" + _ASM_EXTABLE(1b, %l[fault]) + ::: "cc", "memory" : fault); + + cr4_clear_bits(X86_CR4_VMXE); + return 0; + +fault: + cr4_clear_bits(X86_CR4_VMXE); + return -EIO; +} + static void vmx_emergency_disable(void) { int cpu = raw_smp_processor_id(); @@ -734,7 +756,8 @@ static void vmx_emergency_disable(void) loaded_vmcss_on_cpu_link) vmcs_clear(v->vmcs); - __cpu_emergency_vmxoff(); + if (__read_cr4() & X86_CR4_VMXE) + kvm_cpu_vmxoff(); } static void __loaded_vmcs_clear(void *arg) @@ -2799,7 +2822,7 @@ static void vmx_hardware_disable(void) { vmclear_local_loaded_vmcss(); - if (cpu_vmxoff()) + if (kvm_cpu_vmxoff()) kvm_spurious_fault(); hv_reset_evmcs(); -- cgit From 554856b69e3d1faae6f056f75ed6262b5eb01546 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:51 -0700 Subject: KVM: SVM: Make KVM_AMD depend on CPU_SUP_AMD or CPU_SUP_HYGON Make building KVM SVM support depend on support for AMD or Hygon. KVM already effectively restricts SVM support to AMD and Hygon by virtue of the vendor string checks in cpu_has_svm(), and KVM VMX supports depends on one of its three known vendors (Intel, Centaur, or Zhaoxin). Add the CPU_SUP_HYGON clause even though CPU_SUP_HYGON selects CPU_SUP_AMD to document that KVM SVM support isn't just for AMD CPUs, and to prevent breakage should Hygon support ever become a standalone thing. Link: https://lore.kernel.org/r/20230721201859.2307736-12-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 89ca7f4c1464..66dbd1f4d57d 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -101,7 +101,7 @@ config X86_SGX_KVM config KVM_AMD tristate "KVM for AMD processors support" - depends on KVM + depends on KVM && (CPU_SUP_AMD || CPU_SUP_HYGON) help Provides support for KVM on AMD processors equipped with the AMD-V (SVM) extensions. -- cgit From 5df8ecfe3632d5879d1f154f7aa8de441b5d1c89 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:52 -0700 Subject: x86/virt: Drop unnecessary check on extended CPUID level in cpu_has_svm() Drop the explicit check on the extended CPUID level in cpu_has_svm(), the kernel's cached CPUID info will leave the entire SVM leaf unset if said leaf is not supported by hardware. Prior to using cached information, the check was needed to avoid false positives due to Intel's rather crazy CPUID behavior of returning the values of the maximum supported leaf if the specified leaf is unsupported. Fixes: 682a8108872f ("x86/kvm/svm: Simplify cpu_has_svm()") Link: https://lore.kernel.org/r/20230721201859.2307736-13-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/virtext.h | 6 ------ 1 file changed, 6 deletions(-) diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index a27801f2bc71..be50c414efe4 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -39,12 +39,6 @@ static inline int cpu_has_svm(const char **msg) return 0; } - if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) { - if (msg) - *msg = "can't execute cpuid_8000000a"; - return 0; - } - if (!boot_cpu_has(X86_FEATURE_SVM)) { if (msg) *msg = "svm not available"; -- cgit From 85fd29dd5fe459b732e46c7a9782fc403c5604ed Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:53 -0700 Subject: x86/virt: KVM: Open code cpu_has_svm() into kvm_is_svm_supported() Fold the guts of cpu_has_svm() into kvm_is_svm_supported(), its sole remaining user. No functional change intended. Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230721201859.2307736-14-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/virtext.h | 28 ---------------------------- arch/x86/kvm/svm/svm.c | 11 ++++++++--- 2 files changed, 8 insertions(+), 31 deletions(-) diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index be50c414efe4..632575e257d8 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -22,35 +22,7 @@ /* * SVM functions: */ - -/** Check if the CPU has SVM support - * - * You can use the 'msg' arg to get a message describing the problem, - * if the function returns zero. Simply pass NULL if you are not interested - * on the messages; gcc should take care of not generating code for - * the messages on this case. - */ -static inline int cpu_has_svm(const char **msg) -{ - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && - boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) { - if (msg) - *msg = "not amd or hygon"; - return 0; - } - - if (!boot_cpu_has(X86_FEATURE_SVM)) { - if (msg) - *msg = "svm not available"; - return 0; - } - return 1; -} - - /** Disable SVM on the current CPU - * - * You should call this only if cpu_has_svm() returned true. */ static inline void cpu_svm_disable(void) { diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 1ae9c2c7eacb..ff6c769aafb2 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -521,11 +521,16 @@ static void svm_init_osvw(struct kvm_vcpu *vcpu) static bool kvm_is_svm_supported(void) { int cpu = raw_smp_processor_id(); - const char *msg; u64 vm_cr; - if (!cpu_has_svm(&msg)) { - pr_err("SVM not supported by CPU %d, %s\n", cpu, msg); + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) { + pr_err("CPU %d isn't AMD or Hygon\n", cpu); + return false; + } + + if (!boot_cpu_has(X86_FEATURE_SVM)) { + pr_err("SVM not supported by CPU %d\n", cpu); return false; } -- cgit From c4db4f20f3bf619bacd3d705bf427016453eedb1 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:54 -0700 Subject: KVM: SVM: Check that the current CPU supports SVM in kvm_is_svm_supported() Check "this" CPU instead of the boot CPU when querying SVM support so that the per-CPU checks done during hardware enabling actually function as intended, i.e. will detect issues where SVM isn't support on all CPUs. Disable migration for the use from svm_init() mostly so that the standard accessors for the per-CPU data can be used without getting yelled at by CONFIG_DEBUG_PREEMPT=y sanity checks. Preventing the "disabled by BIOS" error message from reporting the wrong CPU is largely a bonus, as ensuring a stable CPU during module load is a non-goal for KVM. Link: https://lore.kernel.org/all/ZAdxNgv0M6P63odE@google.com Cc: Kai Huang Cc: Chao Gao Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230721201859.2307736-15-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index ff6c769aafb2..9e449167e71b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -518,18 +518,20 @@ static void svm_init_osvw(struct kvm_vcpu *vcpu) vcpu->arch.osvw.status |= 1; } -static bool kvm_is_svm_supported(void) +static bool __kvm_is_svm_supported(void) { - int cpu = raw_smp_processor_id(); + int cpu = smp_processor_id(); + struct cpuinfo_x86 *c = &cpu_data(cpu); + u64 vm_cr; - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && - boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) { + if (c->x86_vendor != X86_VENDOR_AMD && + c->x86_vendor != X86_VENDOR_HYGON) { pr_err("CPU %d isn't AMD or Hygon\n", cpu); return false; } - if (!boot_cpu_has(X86_FEATURE_SVM)) { + if (!cpu_has(c, X86_FEATURE_SVM)) { pr_err("SVM not supported by CPU %d\n", cpu); return false; } @@ -548,9 +550,20 @@ static bool kvm_is_svm_supported(void) return true; } +static bool kvm_is_svm_supported(void) +{ + bool supported; + + migrate_disable(); + supported = __kvm_is_svm_supported(); + migrate_enable(); + + return supported; +} + static int svm_check_processor_compat(void) { - if (!kvm_is_svm_supported()) + if (!__kvm_is_svm_supported()) return -EIO; return 0; -- cgit From f9a8866040fcf8672f20c67e07e67ba5a00caba4 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:55 -0700 Subject: KVM: VMX: Ensure CPU is stable when probing basic VMX support Disable migration when probing VMX support during module load to ensure the CPU is stable, mostly to match similar SVM logic, where allowing migration effective requires deliberately writing buggy code. As a bonus, KVM won't report the wrong CPU to userspace if VMX is unsupported, but in practice that is a very, very minor bonus as the only way that reporting the wrong CPU would actually matter is if hardware is broken or if the system is misconfigured, i.e. if KVM gets migrated from a CPU that _does_ support VMX to a CPU that does _not_ support VMX. Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230721201859.2307736-16-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 64377fe3a990..ff0eced7d2ab 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2718,9 +2718,9 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf, return 0; } -static bool kvm_is_vmx_supported(void) +static bool __kvm_is_vmx_supported(void) { - int cpu = raw_smp_processor_id(); + int cpu = smp_processor_id(); if (!(cpuid_ecx(1) & feature_bit(VMX))) { pr_err("VMX not supported by CPU %d\n", cpu); @@ -2736,13 +2736,24 @@ static bool kvm_is_vmx_supported(void) return true; } +static bool kvm_is_vmx_supported(void) +{ + bool supported; + + migrate_disable(); + supported = __kvm_is_vmx_supported(); + migrate_enable(); + + return supported; +} + static int vmx_check_processor_compat(void) { int cpu = raw_smp_processor_id(); struct vmcs_config vmcs_conf; struct vmx_capability vmx_cap; - if (!kvm_is_vmx_supported()) + if (!__kvm_is_vmx_supported()) return -EIO; if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) { -- cgit From 76ab8161083bfd0ae4de9b93e68d639da6e1c726 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:56 -0700 Subject: x86/virt: KVM: Move "disable SVM" helper into KVM SVM Move cpu_svm_disable() into KVM proper now that all hardware virtualization management is routed through KVM. Remove the now-empty virtext.h. No functional change intended. Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230721201859.2307736-17-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/virtext.h | 50 ------------------------------------------ arch/x86/kvm/svm/svm.c | 29 ++++++++++++++++++++---- 2 files changed, 25 insertions(+), 54 deletions(-) delete mode 100644 arch/x86/include/asm/virtext.h diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h deleted file mode 100644 index 632575e257d8..000000000000 --- a/arch/x86/include/asm/virtext.h +++ /dev/null @@ -1,50 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* CPU virtualization extensions handling - * - * This should carry the code for handling CPU virtualization extensions - * that needs to live in the kernel core. - * - * Author: Eduardo Habkost - * - * Copyright (C) 2008, Red Hat Inc. - * - * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc. - */ -#ifndef _ASM_X86_VIRTEX_H -#define _ASM_X86_VIRTEX_H - -#include - -#include -#include -#include - -/* - * SVM functions: - */ -/** Disable SVM on the current CPU - */ -static inline void cpu_svm_disable(void) -{ - uint64_t efer; - - wrmsrl(MSR_VM_HSAVE_PA, 0); - rdmsrl(MSR_EFER, efer); - if (efer & EFER_SVME) { - /* - * Force GIF=1 prior to disabling SVM to ensure INIT and NMI - * aren't blocked, e.g. if a fatal error occurred between CLGI - * and STGI. Note, STGI may #UD if SVM is disabled from NMI - * context between reading EFER and executing STGI. In that - * case, GIF must already be set, otherwise the NMI would have - * been blocked, so just eat the fault. - */ - asm_volatile_goto("1: stgi\n\t" - _ASM_EXTABLE(1b, %l[fault]) - ::: "memory" : fault); -fault: - wrmsrl(MSR_EFER, efer & ~EFER_SVME); - } -} - -#endif /* _ASM_X86_VIRTEX_H */ diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 9e449167e71b..47f9c7156609 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -42,8 +42,6 @@ #include #include -#include - #include #include "trace.h" @@ -582,9 +580,32 @@ out: preempt_enable(); } +static inline void kvm_cpu_svm_disable(void) +{ + uint64_t efer; + + wrmsrl(MSR_VM_HSAVE_PA, 0); + rdmsrl(MSR_EFER, efer); + if (efer & EFER_SVME) { + /* + * Force GIF=1 prior to disabling SVM to ensure INIT and NMI + * aren't blocked, e.g. if a fatal error occurred between CLGI + * and STGI. Note, STGI may #UD if SVM is disabled from NMI + * context between reading EFER and executing STGI. In that + * case, GIF must already be set, otherwise the NMI would have + * been blocked, so just eat the fault. + */ + asm_volatile_goto("1: stgi\n\t" + _ASM_EXTABLE(1b, %l[fault]) + ::: "memory" : fault); +fault: + wrmsrl(MSR_EFER, efer & ~EFER_SVME); + } +} + static void svm_emergency_disable(void) { - cpu_svm_disable(); + kvm_cpu_svm_disable(); } static void svm_hardware_disable(void) @@ -593,7 +614,7 @@ static void svm_hardware_disable(void) if (tsc_scaling) __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); - cpu_svm_disable(); + kvm_cpu_svm_disable(); amd_pmu_disable_virt(); } -- cgit From 6ae44e012f4c35fb67bc61bd0bf1b3c5f504d931 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:57 -0700 Subject: KVM: x86: Force kvm_rebooting=true during emergency reboot/crash Set kvm_rebooting when virtualization is disabled in an emergency so that KVM eats faults on virtualization instructions even if kvm_reboot() isn't reached. Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230721201859.2307736-18-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 2 ++ arch/x86/kvm/vmx/vmx.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 47f9c7156609..8d1b3c801629 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -605,6 +605,8 @@ fault: static void svm_emergency_disable(void) { + kvm_rebooting = true; + kvm_cpu_svm_disable(); } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index ff0eced7d2ab..415665c40a39 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -752,6 +752,8 @@ static void vmx_emergency_disable(void) int cpu = raw_smp_processor_id(); struct loaded_vmcs *v; + kvm_rebooting = true; + list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) vmcs_clear(v->vmcs); -- cgit From 2e6b9bd49b702c4a52278e35202354b709021116 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:58 -0700 Subject: KVM: SVM: Use "standard" stgi() helper when disabling SVM Now that kvm_rebooting is guaranteed to be true prior to disabling SVM in an emergency, use the existing stgi() helper instead of open coding STGI. In effect, eat faults on STGI if and only if kvm_rebooting==true. Link: https://lore.kernel.org/r/20230721201859.2307736-19-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 8d1b3c801629..4785d780cce3 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -588,17 +588,10 @@ static inline void kvm_cpu_svm_disable(void) rdmsrl(MSR_EFER, efer); if (efer & EFER_SVME) { /* - * Force GIF=1 prior to disabling SVM to ensure INIT and NMI - * aren't blocked, e.g. if a fatal error occurred between CLGI - * and STGI. Note, STGI may #UD if SVM is disabled from NMI - * context between reading EFER and executing STGI. In that - * case, GIF must already be set, otherwise the NMI would have - * been blocked, so just eat the fault. + * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and + * NMI aren't blocked. */ - asm_volatile_goto("1: stgi\n\t" - _ASM_EXTABLE(1b, %l[fault]) - ::: "memory" : fault); -fault: + stgi(); wrmsrl(MSR_EFER, efer & ~EFER_SVME); } } -- cgit From a788fbb763b5001f67f2ecdf207a357f9b12838f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 13:18:59 -0700 Subject: KVM: VMX: Skip VMCLEAR logic during emergency reboots if CR4.VMXE=0 Bail from vmx_emergency_disable() without processing the list of loaded VMCSes if CR4.VMXE=0, i.e. if the CPU can't be post-VMXON. It should be impossible for the list to have entries if VMX is already disabled, and even if that invariant doesn't hold, VMCLEAR will #UD anyways, i.e. processing the list is pointless even if it somehow isn't empty. Assuming no existing KVM bugs, this should be a glorified nop. The primary motivation for the change is to avoid having code that looks like it does VMCLEAR, but then skips VMXON, which is nonsensical. Suggested-by: Kai Huang Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230721201859.2307736-20-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 415665c40a39..83338872a39f 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -754,12 +754,20 @@ static void vmx_emergency_disable(void) kvm_rebooting = true; + /* + * Note, CR4.VMXE can be _cleared_ in NMI context, but it can only be + * set in task context. If this races with VMX is disabled by an NMI, + * VMCLEAR and VMXOFF may #UD, but KVM will eat those faults due to + * kvm_rebooting set. + */ + if (!(__read_cr4() & X86_CR4_VMXE)) + return; + list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) vmcs_clear(v->vmcs); - if (__read_cr4() & X86_CR4_VMXE) - kvm_cpu_vmxoff(); + kvm_cpu_vmxoff(); } static void __loaded_vmcs_clear(void *arg) -- cgit From 99b668545356115e11a6cca519361c8cfd02569e Mon Sep 17 00:00:00 2001 From: Tao Su Date: Wed, 2 Aug 2023 10:29:54 +0800 Subject: KVM: x86: Advertise AMX-COMPLEX CPUID to userspace Latest Intel platform GraniteRapids-D introduces AMX-COMPLEX, which adds two instructions to perform matrix multiplication of two tiles containing complex elements and accumulate the results into a packed single precision tile. AMX-COMPLEX is enumerated via CPUID.(EAX=7,ECX=1):EDX[bit 8] Advertise AMX_COMPLEX if it's supported in hardware. There are no VMX controls for the feature, i.e. the instructions can't be interecepted, and KVM advertises base AMX in CPUID if AMX is supported in hardware, even if KVM doesn't advertise AMX as being supported in XCR0, e.g. because the process didn't opt-in to allocating tile data. Signed-off-by: Tao Su Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20230802022954.193843-1-tao1.su@linux.intel.com [sean: tweak last paragraph of changelog] Signed-off-by: Sean Christopherson --- arch/x86/kvm/cpuid.c | 3 ++- arch/x86/kvm/reverse_cpuid.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index ad6566636c22..5a88affb2e1a 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -647,7 +647,8 @@ void kvm_set_cpu_caps(void) ); kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX, - F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI) + F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI) | + F(AMX_COMPLEX) ); kvm_cpu_cap_mask(CPUID_D_1_EAX, diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index 56cbdb24400a..b81650678375 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -43,6 +43,7 @@ enum kvm_only_cpuid_leafs { /* Intel-defined sub-features, CPUID level 0x00000007:1 (EDX) */ #define X86_FEATURE_AVX_VNNI_INT8 KVM_X86_FEATURE(CPUID_7_1_EDX, 4) #define X86_FEATURE_AVX_NE_CONVERT KVM_X86_FEATURE(CPUID_7_1_EDX, 5) +#define X86_FEATURE_AMX_COMPLEX KVM_X86_FEATURE(CPUID_7_1_EDX, 8) #define X86_FEATURE_PREFETCHITI KVM_X86_FEATURE(CPUID_7_1_EDX, 14) /* CPUID level 0x80000007 (EDX). */ -- cgit From 7cafe9b8e22bb3d77f130c461aedf6868c4aaf58 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:15:48 -0700 Subject: KVM: nSVM: Check instead of asserting on nested TSC scaling support Check for nested TSC scaling support on nested SVM VMRUN instead of asserting that TSC scaling is exposed to L1 if L1's MSR_AMD64_TSC_RATIO has diverged from KVM's default. Userspace can trigger the WARN at will by writing the MSR and then updating guest CPUID to hide the feature (modifying guest CPUID is allowed anytime before KVM_RUN). E.g. hacking KVM's state_test selftest to do vcpu_set_msr(vcpu, MSR_AMD64_TSC_RATIO, 0); vcpu_clear_cpuid_feature(vcpu, X86_FEATURE_TSCRATEMSR); after restoring state in a new VM+vCPU yields an endless supply of: ------------[ cut here ]------------ WARNING: CPU: 164 PID: 62565 at arch/x86/kvm/svm/nested.c:699 nested_vmcb02_prepare_control+0x3d6/0x3f0 [kvm_amd] Call Trace: enter_svm_guest_mode+0x114/0x560 [kvm_amd] nested_svm_vmrun+0x260/0x330 [kvm_amd] vmrun_interception+0x29/0x30 [kvm_amd] svm_invoke_exit_handler+0x35/0x100 [kvm_amd] svm_handle_exit+0xe7/0x180 [kvm_amd] kvm_arch_vcpu_ioctl_run+0x1eab/0x2570 [kvm] kvm_vcpu_ioctl+0x4c9/0x5b0 [kvm] __se_sys_ioctl+0x7a/0xc0 __x64_sys_ioctl+0x21/0x30 do_syscall_64+0x41/0x90 entry_SYSCALL_64_after_hwframe+0x63/0xcd RIP: 0033:0x45ca1b Note, the nested #VMEXIT path has the same flaw, but needs a different fix and will be handled separately. Fixes: 5228eb96a487 ("KVM: x86: nSVM: implement nested TSC scaling") Cc: Maxim Levitsky Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230729011608.1065019-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/nested.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 96936ddf1b3c..0b90f5cf9df3 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -695,10 +695,9 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, vmcb02->control.tsc_offset = vcpu->arch.tsc_offset; - if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) { - WARN_ON(!svm->tsc_scaling_enabled); + if (svm->tsc_scaling_enabled && + svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) nested_svm_update_tsc_ratio_msr(vcpu); - } vmcb02->control.int_ctl = (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | -- cgit From 0c94e2468491cbf0754f49a5136ab51294a96b69 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:15:49 -0700 Subject: KVM: nSVM: Load L1's TSC multiplier based on L1 state, not L2 state When emulating nested VM-Exit, load L1's TSC multiplier if L1's desired ratio doesn't match the current ratio, not if the ratio L1 is using for L2 diverges from the default. Functionally, the end result is the same as KVM will run L2 with L1's multiplier if L2's multiplier is the default, i.e. checking that L1's multiplier is loaded is equivalent to checking if L2 has a non-default multiplier. However, the assertion that TSC scaling is exposed to L1 is flawed, as userspace can trigger the WARN at will by writing the MSR and then updating guest CPUID to hide the feature (modifying guest CPUID is allowed anytime before KVM_RUN). E.g. hacking KVM's state_test selftest to do vcpu_set_msr(vcpu, MSR_AMD64_TSC_RATIO, 0); vcpu_clear_cpuid_feature(vcpu, X86_FEATURE_TSCRATEMSR); after restoring state in a new VM+vCPU yields an endless supply of: ------------[ cut here ]------------ WARNING: CPU: 10 PID: 206939 at arch/x86/kvm/svm/nested.c:1105 nested_svm_vmexit+0x6af/0x720 [kvm_amd] Call Trace: nested_svm_exit_handled+0x102/0x1f0 [kvm_amd] svm_handle_exit+0xb9/0x180 [kvm_amd] kvm_arch_vcpu_ioctl_run+0x1eab/0x2570 [kvm] kvm_vcpu_ioctl+0x4c9/0x5b0 [kvm] ? trace_hardirqs_off+0x4d/0xa0 __se_sys_ioctl+0x7a/0xc0 __x64_sys_ioctl+0x21/0x30 do_syscall_64+0x41/0x90 entry_SYSCALL_64_after_hwframe+0x63/0xcd Unlike the nested VMRUN path, hoisting the svm->tsc_scaling_enabled check into the if-statement is wrong as KVM needs to ensure L1's multiplier is loaded in the above scenario. Alternatively, the WARN_ON() could simply be deleted, but that would make KVM's behavior even more subtle, e.g. it's not immediately obvious why it's safe to write MSR_AMD64_TSC_RATIO when checking only tsc_ratio_msr. Fixes: 5228eb96a487 ("KVM: x86: nSVM: implement nested TSC scaling") Cc: Maxim Levitsky Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230729011608.1065019-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/nested.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 0b90f5cf9df3..c66c823ae222 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1100,8 +1100,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm) vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS); } - if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) { - WARN_ON(!svm->tsc_scaling_enabled); + if (kvm_caps.has_tsc_control && + vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) { vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); } -- cgit From c0dc39bd2c584879748dfd1321f8541d1cbeb9b8 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:15:50 -0700 Subject: KVM: nSVM: Use the "outer" helper for writing multiplier to MSR_AMD64_TSC_RATIO When emulating nested SVM transitions, use the outer helper for writing the TSC multiplier for L2. Using the inner helper only for one-off cases, i.e. for paths where KVM is NOT emulating or modifying vCPU state, will allow for multiple cleanups: - Explicitly disabling preemption only in the outer helper - Getting the multiplier from the vCPU field in the outer helper - Skipping the WRMSR in the outer helper if guest state isn't loaded Opportunistically delete an extra newline. No functional change intended. Link: https://lore.kernel.org/r/20230729011608.1065019-4-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/nested.c | 4 ++-- arch/x86/kvm/svm/svm.c | 5 ++--- arch/x86/kvm/svm/svm.h | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index c66c823ae222..5d5a1d7832fb 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1103,7 +1103,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm) if (kvm_caps.has_tsc_control && vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) { vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; - __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); + svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio); } svm->nested.ctl.nested_cr3 = 0; @@ -1536,7 +1536,7 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu) vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio, svm->tsc_ratio_msr); - __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); + svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio); } /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */ diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 4785d780cce3..7217f7532df9 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -567,7 +567,7 @@ static int svm_check_processor_compat(void) return 0; } -void __svm_write_tsc_multiplier(u64 multiplier) +static void __svm_write_tsc_multiplier(u64 multiplier) { preempt_disable(); @@ -1150,12 +1150,11 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); } -static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) +void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) { __svm_write_tsc_multiplier(multiplier); } - /* Evaluate instruction intercepts that depend on guest CPUID features. */ static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu, struct vcpu_svm *svm) diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 18af7e712a5a..7132c0a04817 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -658,7 +658,7 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code); int nested_svm_exit_special(struct vcpu_svm *svm); void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); -void __svm_write_tsc_multiplier(u64 multiplier); +void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier); void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, struct vmcb_control_area *control); void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, -- cgit From 229725acfaea0dd81f245aec40986dc766de40fa Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:15:51 -0700 Subject: KVM: SVM: Clean up preemption toggling related to MSR_AMD64_TSC_RATIO Explicitly disable preemption when writing MSR_AMD64_TSC_RATIO only in the "outer" helper, as all direct callers of the "inner" helper now run with preemption already disabled. And that isn't a coincidence, as the outer helper requires a vCPU and is intended to be used when modifying guest state and/or emulating guest instructions, which are typically done with preemption enabled. Direct use of the inner helper should be extremely limited, as the only time KVM should modify MSR_AMD64_TSC_RATIO without a vCPU is when sanitizing the MSR for a specific pCPU (currently done when {en,dis}abling disabling SVM). The other direct caller is svm_prepare_switch_to_guest(), which does have a vCPU, but is a one-off special case: KVM is about to enter the guest on a specific pCPU and thus must have preemption disabled. Link: https://lore.kernel.org/r/20230729011608.1065019-5-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7217f7532df9..b66af2966ec7 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -569,15 +569,11 @@ static int svm_check_processor_compat(void) static void __svm_write_tsc_multiplier(u64 multiplier) { - preempt_disable(); - if (multiplier == __this_cpu_read(current_tsc_ratio)) - goto out; + return; wrmsrl(MSR_AMD64_TSC_RATIO, multiplier); __this_cpu_write(current_tsc_ratio, multiplier); -out: - preempt_enable(); } static inline void kvm_cpu_svm_disable(void) @@ -1152,7 +1148,9 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) { + preempt_disable(); __svm_write_tsc_multiplier(multiplier); + preempt_enable(); } /* Evaluate instruction intercepts that depend on guest CPUID features. */ -- cgit From 2d63699099ac1fad13a0dbf8538faf4127c062e8 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:15:52 -0700 Subject: KVM: x86: Always write vCPU's current TSC offset/ratio in vendor hooks Drop the @offset and @multiplier params from the kvm_x86_ops hooks for propagating TSC offsets/multipliers into hardware, and instead have the vendor implementations pull the information directly from the vCPU structure. The respective vCPU fields _must_ be written at the same time in order to maintain consistent state, i.e. it's not random luck that the value passed in by all callers is grabbed from the vCPU. Explicitly grabbing the value from the vCPU field in SVM's implementation in particular will allow for additional cleanup without introducing even more subtle dependencies. Specifically, SVM can skip the WRMSR if guest state isn't loaded, i.e. svm_prepare_switch_to_guest() will load the correct value for the vCPU prior to entering the guest. This also reconciles KVM's handling of related values that are stored in the vCPU, as svm_write_tsc_offset() already assumes/requires the caller to have updated l1_tsc_offset. Link: https://lore.kernel.org/r/20230729011608.1065019-6-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 4 ++-- arch/x86/kvm/svm/nested.c | 4 ++-- arch/x86/kvm/svm/svm.c | 8 ++++---- arch/x86/kvm/svm/svm.h | 2 +- arch/x86/kvm/vmx/vmx.c | 8 ++++---- arch/x86/kvm/x86.c | 5 ++--- 6 files changed, 15 insertions(+), 16 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 28bd38303d70..dad9331c5270 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1654,8 +1654,8 @@ struct kvm_x86_ops { u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu); u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu); - void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); - void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu, u64 multiplier); + void (*write_tsc_offset)(struct kvm_vcpu *vcpu); + void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu); /* * Retrieve somewhat arbitrary exit information. Intended to diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 5d5a1d7832fb..3342cc4a5189 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1103,7 +1103,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm) if (kvm_caps.has_tsc_control && vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) { vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; - svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio); + svm_write_tsc_multiplier(vcpu); } svm->nested.ctl.nested_cr3 = 0; @@ -1536,7 +1536,7 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu) vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio, svm->tsc_ratio_msr); - svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio); + svm_write_tsc_multiplier(vcpu); } /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */ diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index b66af2966ec7..2542066652a3 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1137,19 +1137,19 @@ static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu) return svm->tsc_ratio_msr; } -static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) +static void svm_write_tsc_offset(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset; - svm->vmcb->control.tsc_offset = offset; + svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset; vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); } -void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) +void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu) { preempt_disable(); - __svm_write_tsc_multiplier(multiplier); + __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); preempt_enable(); } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 7132c0a04817..5829a1801862 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -658,7 +658,7 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code); int nested_svm_exit_special(struct vcpu_svm *svm); void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); -void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier); +void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu); void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, struct vmcb_control_area *control); void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 83338872a39f..3235998ca261 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1898,14 +1898,14 @@ u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu) return kvm_caps.default_tsc_scaling_ratio; } -static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) +static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu) { - vmcs_write64(TSC_OFFSET, offset); + vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); } -static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) +static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu) { - vmcs_write64(TSC_MULTIPLIER, multiplier); + vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); } /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a1b13d2d1d71..80ec33fc823b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2613,7 +2613,7 @@ static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) else vcpu->arch.tsc_offset = l1_offset; - static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset); + static_call(kvm_x86_write_tsc_offset)(vcpu); } static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier) @@ -2629,8 +2629,7 @@ static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multipli vcpu->arch.tsc_scaling_ratio = l1_multiplier; if (kvm_caps.has_tsc_control) - static_call(kvm_x86_write_tsc_multiplier)( - vcpu, vcpu->arch.tsc_scaling_ratio); + static_call(kvm_x86_write_tsc_multiplier)(vcpu); } static inline bool kvm_check_tsc_unstable(void) -- cgit From 223f93d4d88aedc02bdcc92b3734e76f707812f3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:15:53 -0700 Subject: KVM: nSVM: Skip writes to MSR_AMD64_TSC_RATIO if guest state isn't loaded Skip writes to MSR_AMD64_TSC_RATIO that are done in the context of a vCPU if guest state isn't loaded, i.e. if KVM will update MSR_AMD64_TSC_RATIO during svm_prepare_switch_to_guest() before entering the guest. Checking guest_state_loaded may or may not be a net positive for performance as the current_tsc_ratio cache will optimize away duplicate WRMSRs in the vast majority of scenarios. However, the cost of the check is negligible, and the real motivation is to document that KVM needs to load the vCPU's value only when running the vCPU. Link: https://lore.kernel.org/r/20230729011608.1065019-7-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 2542066652a3..6d8932e05753 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1149,7 +1149,8 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu) void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu) { preempt_disable(); - __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); + if (to_svm(vcpu)->guest_state_loaded) + __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); preempt_enable(); } -- cgit From e98b1085be795354073debd371e2889bea4902b9 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Tue, 11 Jul 2023 17:40:47 +0530 Subject: RISC-V: KVM: Factor-out ONE_REG related code to its own source file The VCPU ONE_REG interface has grown over time and it will continue to grow with new ISA extensions and other features. Let us move all ONE_REG related code to its own source file so that vcpu.c only focuses only on high-level VCPU functions. Signed-off-by: Anup Patel Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/include/asm/kvm_host.h | 6 + arch/riscv/kvm/Makefile | 1 + arch/riscv/kvm/vcpu.c | 529 +----------------------------------- arch/riscv/kvm/vcpu_onereg.c | 548 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 556 insertions(+), 528 deletions(-) create mode 100644 arch/riscv/kvm/vcpu_onereg.c diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 2d8ee53b66c7..55bc7bdbff48 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -337,6 +337,12 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch); +void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); +int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); + int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu); diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index fee0671e2dc1..4c2067fc59fc 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -19,6 +19,7 @@ kvm-y += vcpu_exit.o kvm-y += vcpu_fp.o kvm-y += vcpu_vector.o kvm-y += vcpu_insn.o +kvm-y += vcpu_onereg.o kvm-y += vcpu_switch.o kvm-y += vcpu_sbi.o kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index d12ef99901fc..452d6548e951 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -13,16 +13,12 @@ #include #include #include -#include #include #include #include #include #include #include -#include -#include -#include #include const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { @@ -46,79 +42,6 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { sizeof(kvm_vcpu_stats_desc), }; -#define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0) - -#define KVM_ISA_EXT_ARR(ext) [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext - -/* Mapping between KVM ISA Extension ID & Host ISA extension ID */ -static const unsigned long kvm_isa_ext_arr[] = { - [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a, - [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c, - [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d, - [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f, - [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h, - [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i, - [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m, - [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v, - - KVM_ISA_EXT_ARR(SSAIA), - KVM_ISA_EXT_ARR(SSTC), - KVM_ISA_EXT_ARR(SVINVAL), - KVM_ISA_EXT_ARR(SVNAPOT), - KVM_ISA_EXT_ARR(SVPBMT), - KVM_ISA_EXT_ARR(ZBB), - KVM_ISA_EXT_ARR(ZIHINTPAUSE), - KVM_ISA_EXT_ARR(ZICBOM), - KVM_ISA_EXT_ARR(ZICBOZ), -}; - -static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) -{ - unsigned long i; - - for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) { - if (kvm_isa_ext_arr[i] == base_ext) - return i; - } - - return KVM_RISCV_ISA_EXT_MAX; -} - -static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext) -{ - switch (ext) { - case KVM_RISCV_ISA_EXT_H: - return false; - case KVM_RISCV_ISA_EXT_V: - return riscv_v_vstate_ctrl_user_allowed(); - default: - break; - } - - return true; -} - -static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) -{ - switch (ext) { - case KVM_RISCV_ISA_EXT_A: - case KVM_RISCV_ISA_EXT_C: - case KVM_RISCV_ISA_EXT_I: - case KVM_RISCV_ISA_EXT_M: - case KVM_RISCV_ISA_EXT_SSAIA: - case KVM_RISCV_ISA_EXT_SSTC: - case KVM_RISCV_ISA_EXT_SVINVAL: - case KVM_RISCV_ISA_EXT_SVNAPOT: - case KVM_RISCV_ISA_EXT_ZIHINTPAUSE: - case KVM_RISCV_ISA_EXT_ZBB: - return false; - default: - break; - } - - return true; -} - static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) { struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; @@ -176,7 +99,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) int rc; struct kvm_cpu_context *cntx; struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; - unsigned long host_isa, i; /* Mark this VCPU never ran */ vcpu->arch.ran_atleast_once = false; @@ -184,12 +106,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX); /* Setup ISA features available to VCPU */ - for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) { - host_isa = kvm_isa_ext_arr[i]; - if (__riscv_isa_extension_available(NULL, host_isa) && - kvm_riscv_vcpu_isa_enable_allowed(i)) - set_bit(host_isa, vcpu->arch.isa); - } + kvm_riscv_vcpu_setup_isa(vcpu); /* Setup vendor, arch, and implementation details */ vcpu->arch.mvendorid = sbi_get_mvendorid(); @@ -294,450 +211,6 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } -static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_CONFIG); - unsigned long reg_val; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - - switch (reg_num) { - case KVM_REG_RISCV_CONFIG_REG(isa): - reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK; - break; - case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): - if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) - return -EINVAL; - reg_val = riscv_cbom_block_size; - break; - case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): - if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ)) - return -EINVAL; - reg_val = riscv_cboz_block_size; - break; - case KVM_REG_RISCV_CONFIG_REG(mvendorid): - reg_val = vcpu->arch.mvendorid; - break; - case KVM_REG_RISCV_CONFIG_REG(marchid): - reg_val = vcpu->arch.marchid; - break; - case KVM_REG_RISCV_CONFIG_REG(mimpid): - reg_val = vcpu->arch.mimpid; - break; - default: - return -EINVAL; - } - - if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - return 0; -} - -static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_CONFIG); - unsigned long i, isa_ext, reg_val; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - - if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - switch (reg_num) { - case KVM_REG_RISCV_CONFIG_REG(isa): - /* - * This ONE REG interface is only defined for - * single letter extensions. - */ - if (fls(reg_val) >= RISCV_ISA_EXT_BASE) - return -EINVAL; - - if (!vcpu->arch.ran_atleast_once) { - /* Ignore the enable/disable request for certain extensions */ - for (i = 0; i < RISCV_ISA_EXT_BASE; i++) { - isa_ext = kvm_riscv_vcpu_base2isa_ext(i); - if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) { - reg_val &= ~BIT(i); - continue; - } - if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext)) - if (reg_val & BIT(i)) - reg_val &= ~BIT(i); - if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext)) - if (!(reg_val & BIT(i))) - reg_val |= BIT(i); - } - reg_val &= riscv_isa_extension_base(NULL); - /* Do not modify anything beyond single letter extensions */ - reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) | - (reg_val & KVM_RISCV_BASE_ISA_MASK); - vcpu->arch.isa[0] = reg_val; - kvm_riscv_vcpu_fp_reset(vcpu); - } else { - return -EOPNOTSUPP; - } - break; - case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): - return -EOPNOTSUPP; - case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): - return -EOPNOTSUPP; - case KVM_REG_RISCV_CONFIG_REG(mvendorid): - if (!vcpu->arch.ran_atleast_once) - vcpu->arch.mvendorid = reg_val; - else - return -EBUSY; - break; - case KVM_REG_RISCV_CONFIG_REG(marchid): - if (!vcpu->arch.ran_atleast_once) - vcpu->arch.marchid = reg_val; - else - return -EBUSY; - break; - case KVM_REG_RISCV_CONFIG_REG(mimpid): - if (!vcpu->arch.ran_atleast_once) - vcpu->arch.mimpid = reg_val; - else - return -EBUSY; - break; - default: - return -EINVAL; - } - - return 0; -} - -static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_CORE); - unsigned long reg_val; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) - return -EINVAL; - - if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) - reg_val = cntx->sepc; - else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num && - reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6)) - reg_val = ((unsigned long *)cntx)[reg_num]; - else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) - reg_val = (cntx->sstatus & SR_SPP) ? - KVM_RISCV_MODE_S : KVM_RISCV_MODE_U; - else - return -EINVAL; - - if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - return 0; -} - -static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_CORE); - unsigned long reg_val; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) - return -EINVAL; - - if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) - cntx->sepc = reg_val; - else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num && - reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6)) - ((unsigned long *)cntx)[reg_num] = reg_val; - else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) { - if (reg_val == KVM_RISCV_MODE_S) - cntx->sstatus |= SR_SPP; - else - cntx->sstatus &= ~SR_SPP; - } else - return -EINVAL; - - return 0; -} - -static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu, - unsigned long reg_num, - unsigned long *out_val) -{ - struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; - - if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) - return -EINVAL; - - if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { - kvm_riscv_vcpu_flush_interrupts(vcpu); - *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK; - *out_val |= csr->hvip & ~IRQ_LOCAL_MASK; - } else - *out_val = ((unsigned long *)csr)[reg_num]; - - return 0; -} - -static inline int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu, - unsigned long reg_num, - unsigned long reg_val) -{ - struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; - - if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) - return -EINVAL; - - if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { - reg_val &= VSIP_VALID_MASK; - reg_val <<= VSIP_TO_HVIP_SHIFT; - } - - ((unsigned long *)csr)[reg_num] = reg_val; - - if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) - WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0); - - return 0; -} - -static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - int rc; - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_CSR); - unsigned long reg_val, reg_subtype; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - - reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; - reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; - switch (reg_subtype) { - case KVM_REG_RISCV_CSR_GENERAL: - rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, ®_val); - break; - case KVM_REG_RISCV_CSR_AIA: - rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val); - break; - default: - rc = -EINVAL; - break; - } - if (rc) - return rc; - - if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - return 0; -} - -static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - int rc; - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_CSR); - unsigned long reg_val, reg_subtype; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - - if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; - reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; - switch (reg_subtype) { - case KVM_REG_RISCV_CSR_GENERAL: - rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val); - break; - case KVM_REG_RISCV_CSR_AIA: - rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val); - break; - default: - rc = -EINVAL; - break; - } - if (rc) - return rc; - - return 0; -} - -static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_ISA_EXT); - unsigned long reg_val = 0; - unsigned long host_isa_ext; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - - if (reg_num >= KVM_RISCV_ISA_EXT_MAX || - reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) - return -EINVAL; - - host_isa_ext = kvm_isa_ext_arr[reg_num]; - if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext)) - reg_val = 1; /* Mark the given extension as available */ - - if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - return 0; -} - -static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_ISA_EXT); - unsigned long reg_val; - unsigned long host_isa_ext; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - - if (reg_num >= KVM_RISCV_ISA_EXT_MAX || - reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) - return -EINVAL; - - if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) - return -EFAULT; - - host_isa_ext = kvm_isa_ext_arr[reg_num]; - if (!__riscv_isa_extension_available(NULL, host_isa_ext)) - return -EOPNOTSUPP; - - if (!vcpu->arch.ran_atleast_once) { - /* - * All multi-letter extension and a few single letter - * extension can be disabled - */ - if (reg_val == 1 && - kvm_riscv_vcpu_isa_enable_allowed(reg_num)) - set_bit(host_isa_ext, vcpu->arch.isa); - else if (!reg_val && - kvm_riscv_vcpu_isa_disable_allowed(reg_num)) - clear_bit(host_isa_ext, vcpu->arch.isa); - else - return -EINVAL; - kvm_riscv_vcpu_fp_reset(vcpu); - } else { - return -EOPNOTSUPP; - } - - return 0; -} - -static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - switch (reg->id & KVM_REG_RISCV_TYPE_MASK) { - case KVM_REG_RISCV_CONFIG: - return kvm_riscv_vcpu_set_reg_config(vcpu, reg); - case KVM_REG_RISCV_CORE: - return kvm_riscv_vcpu_set_reg_core(vcpu, reg); - case KVM_REG_RISCV_CSR: - return kvm_riscv_vcpu_set_reg_csr(vcpu, reg); - case KVM_REG_RISCV_TIMER: - return kvm_riscv_vcpu_set_reg_timer(vcpu, reg); - case KVM_REG_RISCV_FP_F: - return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, - KVM_REG_RISCV_FP_F); - case KVM_REG_RISCV_FP_D: - return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, - KVM_REG_RISCV_FP_D); - case KVM_REG_RISCV_ISA_EXT: - return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); - case KVM_REG_RISCV_SBI_EXT: - return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg); - case KVM_REG_RISCV_VECTOR: - return kvm_riscv_vcpu_set_reg_vector(vcpu, reg, - KVM_REG_RISCV_VECTOR); - default: - break; - } - - return -EINVAL; -} - -static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) -{ - switch (reg->id & KVM_REG_RISCV_TYPE_MASK) { - case KVM_REG_RISCV_CONFIG: - return kvm_riscv_vcpu_get_reg_config(vcpu, reg); - case KVM_REG_RISCV_CORE: - return kvm_riscv_vcpu_get_reg_core(vcpu, reg); - case KVM_REG_RISCV_CSR: - return kvm_riscv_vcpu_get_reg_csr(vcpu, reg); - case KVM_REG_RISCV_TIMER: - return kvm_riscv_vcpu_get_reg_timer(vcpu, reg); - case KVM_REG_RISCV_FP_F: - return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, - KVM_REG_RISCV_FP_F); - case KVM_REG_RISCV_FP_D: - return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, - KVM_REG_RISCV_FP_D); - case KVM_REG_RISCV_ISA_EXT: - return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); - case KVM_REG_RISCV_SBI_EXT: - return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg); - case KVM_REG_RISCV_VECTOR: - return kvm_riscv_vcpu_get_reg_vector(vcpu, reg, - KVM_REG_RISCV_VECTOR); - default: - break; - } - - return -EINVAL; -} - long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c new file mode 100644 index 000000000000..3efa2a77d2d9 --- /dev/null +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -0,0 +1,548 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * Copyright (C) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0) + +#define KVM_ISA_EXT_ARR(ext) \ +[KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext + +/* Mapping between KVM ISA Extension ID & Host ISA extension ID */ +static const unsigned long kvm_isa_ext_arr[] = { + [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a, + [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c, + [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d, + [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f, + [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h, + [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i, + [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m, + [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v, + + KVM_ISA_EXT_ARR(SSAIA), + KVM_ISA_EXT_ARR(SSTC), + KVM_ISA_EXT_ARR(SVINVAL), + KVM_ISA_EXT_ARR(SVNAPOT), + KVM_ISA_EXT_ARR(SVPBMT), + KVM_ISA_EXT_ARR(ZBB), + KVM_ISA_EXT_ARR(ZIHINTPAUSE), + KVM_ISA_EXT_ARR(ZICBOM), + KVM_ISA_EXT_ARR(ZICBOZ), +}; + +static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) +{ + unsigned long i; + + for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) { + if (kvm_isa_ext_arr[i] == base_ext) + return i; + } + + return KVM_RISCV_ISA_EXT_MAX; +} + +static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext) +{ + switch (ext) { + case KVM_RISCV_ISA_EXT_H: + return false; + case KVM_RISCV_ISA_EXT_V: + return riscv_v_vstate_ctrl_user_allowed(); + default: + break; + } + + return true; +} + +static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) +{ + switch (ext) { + case KVM_RISCV_ISA_EXT_A: + case KVM_RISCV_ISA_EXT_C: + case KVM_RISCV_ISA_EXT_I: + case KVM_RISCV_ISA_EXT_M: + case KVM_RISCV_ISA_EXT_SSAIA: + case KVM_RISCV_ISA_EXT_SSTC: + case KVM_RISCV_ISA_EXT_SVINVAL: + case KVM_RISCV_ISA_EXT_SVNAPOT: + case KVM_RISCV_ISA_EXT_ZIHINTPAUSE: + case KVM_RISCV_ISA_EXT_ZBB: + return false; + default: + break; + } + + return true; +} + +void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu) +{ + unsigned long host_isa, i; + + for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) { + host_isa = kvm_isa_ext_arr[i]; + if (__riscv_isa_extension_available(NULL, host_isa) && + kvm_riscv_vcpu_isa_enable_allowed(i)) + set_bit(host_isa, vcpu->arch.isa); + } +} + +static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_CONFIG); + unsigned long reg_val; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + + switch (reg_num) { + case KVM_REG_RISCV_CONFIG_REG(isa): + reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK; + break; + case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): + if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) + return -EINVAL; + reg_val = riscv_cbom_block_size; + break; + case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): + if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ)) + return -EINVAL; + reg_val = riscv_cboz_block_size; + break; + case KVM_REG_RISCV_CONFIG_REG(mvendorid): + reg_val = vcpu->arch.mvendorid; + break; + case KVM_REG_RISCV_CONFIG_REG(marchid): + reg_val = vcpu->arch.marchid; + break; + case KVM_REG_RISCV_CONFIG_REG(mimpid): + reg_val = vcpu->arch.mimpid; + break; + default: + return -EINVAL; + } + + if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + +static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_CONFIG); + unsigned long i, isa_ext, reg_val; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + + if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + switch (reg_num) { + case KVM_REG_RISCV_CONFIG_REG(isa): + /* + * This ONE REG interface is only defined for + * single letter extensions. + */ + if (fls(reg_val) >= RISCV_ISA_EXT_BASE) + return -EINVAL; + + if (!vcpu->arch.ran_atleast_once) { + /* Ignore the enable/disable request for certain extensions */ + for (i = 0; i < RISCV_ISA_EXT_BASE; i++) { + isa_ext = kvm_riscv_vcpu_base2isa_ext(i); + if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) { + reg_val &= ~BIT(i); + continue; + } + if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext)) + if (reg_val & BIT(i)) + reg_val &= ~BIT(i); + if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext)) + if (!(reg_val & BIT(i))) + reg_val |= BIT(i); + } + reg_val &= riscv_isa_extension_base(NULL); + /* Do not modify anything beyond single letter extensions */ + reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) | + (reg_val & KVM_RISCV_BASE_ISA_MASK); + vcpu->arch.isa[0] = reg_val; + kvm_riscv_vcpu_fp_reset(vcpu); + } else { + return -EOPNOTSUPP; + } + break; + case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): + return -EOPNOTSUPP; + case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): + return -EOPNOTSUPP; + case KVM_REG_RISCV_CONFIG_REG(mvendorid): + if (!vcpu->arch.ran_atleast_once) + vcpu->arch.mvendorid = reg_val; + else + return -EBUSY; + break; + case KVM_REG_RISCV_CONFIG_REG(marchid): + if (!vcpu->arch.ran_atleast_once) + vcpu->arch.marchid = reg_val; + else + return -EBUSY; + break; + case KVM_REG_RISCV_CONFIG_REG(mimpid): + if (!vcpu->arch.ran_atleast_once) + vcpu->arch.mimpid = reg_val; + else + return -EBUSY; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_CORE); + unsigned long reg_val; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) + return -EINVAL; + + if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) + reg_val = cntx->sepc; + else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num && + reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6)) + reg_val = ((unsigned long *)cntx)[reg_num]; + else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) + reg_val = (cntx->sstatus & SR_SPP) ? + KVM_RISCV_MODE_S : KVM_RISCV_MODE_U; + else + return -EINVAL; + + if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + +static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_CORE); + unsigned long reg_val; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) + return -EINVAL; + + if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) + cntx->sepc = reg_val; + else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num && + reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6)) + ((unsigned long *)cntx)[reg_num] = reg_val; + else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) { + if (reg_val == KVM_RISCV_MODE_S) + cntx->sstatus |= SR_SPP; + else + cntx->sstatus &= ~SR_SPP; + } else + return -EINVAL; + + return 0; +} + +static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu, + unsigned long reg_num, + unsigned long *out_val) +{ + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + + if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) + return -EINVAL; + + if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { + kvm_riscv_vcpu_flush_interrupts(vcpu); + *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK; + *out_val |= csr->hvip & ~IRQ_LOCAL_MASK; + } else + *out_val = ((unsigned long *)csr)[reg_num]; + + return 0; +} + +static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu, + unsigned long reg_num, + unsigned long reg_val) +{ + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + + if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) + return -EINVAL; + + if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { + reg_val &= VSIP_VALID_MASK; + reg_val <<= VSIP_TO_HVIP_SHIFT; + } + + ((unsigned long *)csr)[reg_num] = reg_val; + + if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) + WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0); + + return 0; +} + +static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + int rc; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_CSR); + unsigned long reg_val, reg_subtype; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + + reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; + reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; + switch (reg_subtype) { + case KVM_REG_RISCV_CSR_GENERAL: + rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, ®_val); + break; + case KVM_REG_RISCV_CSR_AIA: + rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val); + break; + default: + rc = -EINVAL; + break; + } + if (rc) + return rc; + + if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + +static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + int rc; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_CSR); + unsigned long reg_val, reg_subtype; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + + if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; + reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; + switch (reg_subtype) { + case KVM_REG_RISCV_CSR_GENERAL: + rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val); + break; + case KVM_REG_RISCV_CSR_AIA: + rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val); + break; + default: + rc = -EINVAL; + break; + } + if (rc) + return rc; + + return 0; +} + +static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_ISA_EXT); + unsigned long reg_val = 0; + unsigned long host_isa_ext; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + + if (reg_num >= KVM_RISCV_ISA_EXT_MAX || + reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) + return -EINVAL; + + host_isa_ext = kvm_isa_ext_arr[reg_num]; + if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext)) + reg_val = 1; /* Mark the given extension as available */ + + if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + +static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_ISA_EXT); + unsigned long reg_val; + unsigned long host_isa_ext; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + + if (reg_num >= KVM_RISCV_ISA_EXT_MAX || + reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) + return -EINVAL; + + if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + host_isa_ext = kvm_isa_ext_arr[reg_num]; + if (!__riscv_isa_extension_available(NULL, host_isa_ext)) + return -EOPNOTSUPP; + + if (!vcpu->arch.ran_atleast_once) { + /* + * All multi-letter extension and a few single letter + * extension can be disabled + */ + if (reg_val == 1 && + kvm_riscv_vcpu_isa_enable_allowed(reg_num)) + set_bit(host_isa_ext, vcpu->arch.isa); + else if (!reg_val && + kvm_riscv_vcpu_isa_disable_allowed(reg_num)) + clear_bit(host_isa_ext, vcpu->arch.isa); + else + return -EINVAL; + kvm_riscv_vcpu_fp_reset(vcpu); + } else { + return -EOPNOTSUPP; + } + + return 0; +} + +int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + switch (reg->id & KVM_REG_RISCV_TYPE_MASK) { + case KVM_REG_RISCV_CONFIG: + return kvm_riscv_vcpu_set_reg_config(vcpu, reg); + case KVM_REG_RISCV_CORE: + return kvm_riscv_vcpu_set_reg_core(vcpu, reg); + case KVM_REG_RISCV_CSR: + return kvm_riscv_vcpu_set_reg_csr(vcpu, reg); + case KVM_REG_RISCV_TIMER: + return kvm_riscv_vcpu_set_reg_timer(vcpu, reg); + case KVM_REG_RISCV_FP_F: + return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, + KVM_REG_RISCV_FP_F); + case KVM_REG_RISCV_FP_D: + return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, + KVM_REG_RISCV_FP_D); + case KVM_REG_RISCV_ISA_EXT: + return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); + case KVM_REG_RISCV_SBI_EXT: + return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg); + case KVM_REG_RISCV_VECTOR: + return kvm_riscv_vcpu_set_reg_vector(vcpu, reg, + KVM_REG_RISCV_VECTOR); + default: + break; + } + + return -EINVAL; +} + +int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + switch (reg->id & KVM_REG_RISCV_TYPE_MASK) { + case KVM_REG_RISCV_CONFIG: + return kvm_riscv_vcpu_get_reg_config(vcpu, reg); + case KVM_REG_RISCV_CORE: + return kvm_riscv_vcpu_get_reg_core(vcpu, reg); + case KVM_REG_RISCV_CSR: + return kvm_riscv_vcpu_get_reg_csr(vcpu, reg); + case KVM_REG_RISCV_TIMER: + return kvm_riscv_vcpu_get_reg_timer(vcpu, reg); + case KVM_REG_RISCV_FP_F: + return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, + KVM_REG_RISCV_FP_F); + case KVM_REG_RISCV_FP_D: + return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, + KVM_REG_RISCV_FP_D); + case KVM_REG_RISCV_ISA_EXT: + return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); + case KVM_REG_RISCV_SBI_EXT: + return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg); + case KVM_REG_RISCV_VECTOR: + return kvm_riscv_vcpu_get_reg_vector(vcpu, reg, + KVM_REG_RISCV_VECTOR); + default: + break; + } + + return -EINVAL; +} -- cgit From 613029442a4b837d3f8ce8ab08064f095ecdbdb6 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Tue, 11 Jul 2023 22:11:16 +0530 Subject: RISC-V: KVM: Extend ONE_REG to enable/disable multiple ISA extensions Currently, the ISA extension ONE_REG interface only allows enabling or disabling one extension at a time. To improve this, we extend the ISA extension ONE_REG interface (similar to SBI extension ONE_REG interface) so that KVM user space can enable/disable multiple extensions in one ioctl. Signed-off-by: Anup Patel Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/include/uapi/asm/kvm.h | 9 +++ arch/riscv/kvm/vcpu_onereg.c | 153 ++++++++++++++++++++++++++++++-------- 2 files changed, 133 insertions(+), 29 deletions(-) diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 930fdc4101cd..6c2285f86545 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -193,6 +193,15 @@ enum KVM_RISCV_SBI_EXT_ID { /* ISA Extension registers are mapped as type 7 */ #define KVM_REG_RISCV_ISA_EXT (0x07 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_ISA_SINGLE (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_ISA_MULTI_EN (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_ISA_MULTI_DIS (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_ISA_MULTI_REG(__ext_id) \ + ((__ext_id) / __BITS_PER_LONG) +#define KVM_REG_RISCV_ISA_MULTI_MASK(__ext_id) \ + (1UL << ((__ext_id) % __BITS_PER_LONG)) +#define KVM_REG_RISCV_ISA_MULTI_REG_LAST \ + KVM_REG_RISCV_ISA_MULTI_REG(KVM_RISCV_ISA_EXT_MAX - 1) /* SBI extension registers are mapped as type 8 */ #define KVM_REG_RISCV_SBI_EXT (0x08 << KVM_REG_RISCV_TYPE_SHIFT) diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index 3efa2a77d2d9..c57c7fa260c2 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -409,55 +409,34 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, return 0; } -static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) +static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu, + unsigned long reg_num, + unsigned long *reg_val) { - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_ISA_EXT); - unsigned long reg_val = 0; unsigned long host_isa_ext; - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - if (reg_num >= KVM_RISCV_ISA_EXT_MAX || reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) return -EINVAL; + *reg_val = 0; host_isa_ext = kvm_isa_ext_arr[reg_num]; if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext)) - reg_val = 1; /* Mark the given extension as available */ - - if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) - return -EFAULT; + *reg_val = 1; /* Mark the given extension as available */ return 0; } -static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) +static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu, + unsigned long reg_num, + unsigned long reg_val) { - unsigned long __user *uaddr = - (unsigned long __user *)(unsigned long)reg->addr; - unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | - KVM_REG_SIZE_MASK | - KVM_REG_RISCV_ISA_EXT); - unsigned long reg_val; unsigned long host_isa_ext; - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) - return -EINVAL; - if (reg_num >= KVM_RISCV_ISA_EXT_MAX || reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) return -EINVAL; - if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) - return -EFAULT; - host_isa_ext = kvm_isa_ext_arr[reg_num]; if (!__riscv_isa_extension_available(NULL, host_isa_ext)) return -EOPNOTSUPP; @@ -483,6 +462,122 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, return 0; } +static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu, + unsigned long reg_num, + unsigned long *reg_val) +{ + unsigned long i, ext_id, ext_val; + + if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST) + return -EINVAL; + + for (i = 0; i < BITS_PER_LONG; i++) { + ext_id = i + reg_num * BITS_PER_LONG; + if (ext_id >= KVM_RISCV_ISA_EXT_MAX) + break; + + ext_val = 0; + riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val); + if (ext_val) + *reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id); + } + + return 0; +} + +static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu, + unsigned long reg_num, + unsigned long reg_val, bool enable) +{ + unsigned long i, ext_id; + + if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST) + return -EINVAL; + + for_each_set_bit(i, ®_val, BITS_PER_LONG) { + ext_id = i + reg_num * BITS_PER_LONG; + if (ext_id >= KVM_RISCV_ISA_EXT_MAX) + break; + + riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable); + } + + return 0; +} + +static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + int rc; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_ISA_EXT); + unsigned long reg_val, reg_subtype; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + + reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; + reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; + + reg_val = 0; + switch (reg_subtype) { + case KVM_REG_RISCV_ISA_SINGLE: + rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, ®_val); + break; + case KVM_REG_RISCV_ISA_MULTI_EN: + case KVM_REG_RISCV_ISA_MULTI_DIS: + rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, ®_val); + if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS) + reg_val = ~reg_val; + break; + default: + rc = -EINVAL; + } + if (rc) + return rc; + + if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + +static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_ISA_EXT); + unsigned long reg_val, reg_subtype; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + + reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; + reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; + + if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + switch (reg_subtype) { + case KVM_REG_RISCV_ISA_SINGLE: + return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val); + case KVM_REG_RISCV_SBI_MULTI_EN: + return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true); + case KVM_REG_RISCV_SBI_MULTI_DIS: + return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false); + default: + return -EINVAL; + } + + return 0; +} + int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { -- cgit From 41716861e4251b202809fc57deaed3881934e062 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Wed, 12 Jul 2023 12:38:11 +0530 Subject: RISC-V: KVM: Allow Zba and Zbs extensions for Guest/VM We extend the KVM ISA extension ONE_REG interface to allow KVM user space to detect and enable Zba and Zbs extensions for Guest/VM. Signed-off-by: Anup Patel Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/include/uapi/asm/kvm.h | 2 ++ arch/riscv/kvm/vcpu_onereg.c | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 6c2285f86545..68f929d88f43 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -124,6 +124,8 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_SSAIA, KVM_RISCV_ISA_EXT_V, KVM_RISCV_ISA_EXT_SVNAPOT, + KVM_RISCV_ISA_EXT_ZBA, + KVM_RISCV_ISA_EXT_ZBS, KVM_RISCV_ISA_EXT_MAX, }; diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index c57c7fa260c2..4389ae050d33 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -38,7 +38,9 @@ static const unsigned long kvm_isa_ext_arr[] = { KVM_ISA_EXT_ARR(SVINVAL), KVM_ISA_EXT_ARR(SVNAPOT), KVM_ISA_EXT_ARR(SVPBMT), + KVM_ISA_EXT_ARR(ZBA), KVM_ISA_EXT_ARR(ZBB), + KVM_ISA_EXT_ARR(ZBS), KVM_ISA_EXT_ARR(ZIHINTPAUSE), KVM_ISA_EXT_ARR(ZICBOM), KVM_ISA_EXT_ARR(ZICBOZ), @@ -82,7 +84,9 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_SVINVAL: case KVM_RISCV_ISA_EXT_SVNAPOT: case KVM_RISCV_ISA_EXT_ZIHINTPAUSE: + case KVM_RISCV_ISA_EXT_ZBA: case KVM_RISCV_ISA_EXT_ZBB: + case KVM_RISCV_ISA_EXT_ZBS: return false; default: break; -- cgit From 043cba064ecd2c3ccaf575c15caf6dd829c64e7b Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Wed, 12 Jul 2023 12:53:50 +0530 Subject: RISC-V: KVM: Allow Zicntr, Zicsr, Zifencei, and Zihpm for Guest/VM We extend the KVM ISA extension ONE_REG interface to allow KVM user space to detect and enable Zicntr, Zicsr, Zifencei, and Zihpm extensions for Guest/VM. Signed-off-by: Anup Patel Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/include/uapi/asm/kvm.h | 4 ++++ arch/riscv/kvm/vcpu_onereg.c | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 68f929d88f43..9c35e1427f73 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -126,6 +126,10 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_SVNAPOT, KVM_RISCV_ISA_EXT_ZBA, KVM_RISCV_ISA_EXT_ZBS, + KVM_RISCV_ISA_EXT_ZICNTR, + KVM_RISCV_ISA_EXT_ZICSR, + KVM_RISCV_ISA_EXT_ZIFENCEI, + KVM_RISCV_ISA_EXT_ZIHPM, KVM_RISCV_ISA_EXT_MAX, }; diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index 4389ae050d33..0f75b6b3ced1 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -41,7 +41,11 @@ static const unsigned long kvm_isa_ext_arr[] = { KVM_ISA_EXT_ARR(ZBA), KVM_ISA_EXT_ARR(ZBB), KVM_ISA_EXT_ARR(ZBS), + KVM_ISA_EXT_ARR(ZICNTR), + KVM_ISA_EXT_ARR(ZICSR), + KVM_ISA_EXT_ARR(ZIFENCEI), KVM_ISA_EXT_ARR(ZIHINTPAUSE), + KVM_ISA_EXT_ARR(ZIHPM), KVM_ISA_EXT_ARR(ZICBOM), KVM_ISA_EXT_ARR(ZICBOZ), }; @@ -83,7 +87,11 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_SSTC: case KVM_RISCV_ISA_EXT_SVINVAL: case KVM_RISCV_ISA_EXT_SVNAPOT: + case KVM_RISCV_ISA_EXT_ZICNTR: + case KVM_RISCV_ISA_EXT_ZICSR: + case KVM_RISCV_ISA_EXT_ZIFENCEI: case KVM_RISCV_ISA_EXT_ZIHINTPAUSE: + case KVM_RISCV_ISA_EXT_ZIHPM: case KVM_RISCV_ISA_EXT_ZBA: case KVM_RISCV_ISA_EXT_ZBB: case KVM_RISCV_ISA_EXT_ZBS: -- cgit From d2064d4a6e3a565879da196947fb35cc7b1f3256 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Wed, 12 Jul 2023 15:33:12 +0530 Subject: RISC-V: KVM: Sort ISA extensions alphabetically in ONE_REG interface Let us sort isa extensions alphabetically in kvm_isa_ext_arr[] and kvm_riscv_vcpu_isa_disable_allowed() so that future insertions are more predictable. Signed-off-by: Anup Patel Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu_onereg.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index 0f75b6b3ced1..0dc2c2cecb45 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -24,6 +24,7 @@ /* Mapping between KVM ISA Extension ID & Host ISA extension ID */ static const unsigned long kvm_isa_ext_arr[] = { + /* Single letter extensions (alphabetically sorted) */ [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a, [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c, [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d, @@ -32,7 +33,7 @@ static const unsigned long kvm_isa_ext_arr[] = { [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i, [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m, [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v, - + /* Multi letter extensions (alphabetically sorted) */ KVM_ISA_EXT_ARR(SSAIA), KVM_ISA_EXT_ARR(SSTC), KVM_ISA_EXT_ARR(SVINVAL), @@ -41,13 +42,13 @@ static const unsigned long kvm_isa_ext_arr[] = { KVM_ISA_EXT_ARR(ZBA), KVM_ISA_EXT_ARR(ZBB), KVM_ISA_EXT_ARR(ZBS), + KVM_ISA_EXT_ARR(ZICBOM), + KVM_ISA_EXT_ARR(ZICBOZ), KVM_ISA_EXT_ARR(ZICNTR), KVM_ISA_EXT_ARR(ZICSR), KVM_ISA_EXT_ARR(ZIFENCEI), KVM_ISA_EXT_ARR(ZIHINTPAUSE), KVM_ISA_EXT_ARR(ZIHPM), - KVM_ISA_EXT_ARR(ZICBOM), - KVM_ISA_EXT_ARR(ZICBOZ), }; static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) @@ -87,14 +88,14 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_SSTC: case KVM_RISCV_ISA_EXT_SVINVAL: case KVM_RISCV_ISA_EXT_SVNAPOT: + case KVM_RISCV_ISA_EXT_ZBA: + case KVM_RISCV_ISA_EXT_ZBB: + case KVM_RISCV_ISA_EXT_ZBS: case KVM_RISCV_ISA_EXT_ZICNTR: case KVM_RISCV_ISA_EXT_ZICSR: case KVM_RISCV_ISA_EXT_ZIFENCEI: case KVM_RISCV_ISA_EXT_ZIHINTPAUSE: case KVM_RISCV_ISA_EXT_ZIHPM: - case KVM_RISCV_ISA_EXT_ZBA: - case KVM_RISCV_ISA_EXT_ZBB: - case KVM_RISCV_ISA_EXT_ZBS: return false; default: break; -- cgit From 2776421e6839b67c34261861b169b7ee737b4c00 Mon Sep 17 00:00:00 2001 From: Daniel Henrique Barboza Date: Fri, 28 Jul 2023 18:01:22 -0300 Subject: RISC-V: KVM: provide UAPI for host SATP mode KVM userspaces need to be aware of the host SATP to allow them to advertise it back to the guest OS. Since this information is used to build the guest FDT we can't wait for the SATP reg to be readable. We just need to read the SATP mode, thus we can use the existing 'satp_mode' global that represents the SATP reg with MODE set and both ASID and PPN cleared. E.g. for a 32 bit host running with sv32 satp_mode is 0x80000000, for a 64 bit host running sv57 satp_mode is 0xa000000000000000, and so on. Add a new userspace virtual config register 'satp_mode' to allow userspace to read the current SATP mode the host is using with GET_ONE_REG API before spinning the vcpu. 'satp_mode' can't be changed via KVM, so SET_ONE_REG is allowed as long as userspace writes the existing 'satp_mode'. Signed-off-by: Daniel Henrique Barboza Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/include/asm/csr.h | 2 ++ arch/riscv/include/uapi/asm/kvm.h | 1 + arch/riscv/kvm/vcpu_onereg.c | 7 +++++++ 3 files changed, 10 insertions(+) diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 7bac43a3176e..777cb8299551 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -54,6 +54,7 @@ #ifndef CONFIG_64BIT #define SATP_PPN _AC(0x003FFFFF, UL) #define SATP_MODE_32 _AC(0x80000000, UL) +#define SATP_MODE_SHIFT 31 #define SATP_ASID_BITS 9 #define SATP_ASID_SHIFT 22 #define SATP_ASID_MASK _AC(0x1FF, UL) @@ -62,6 +63,7 @@ #define SATP_MODE_39 _AC(0x8000000000000000, UL) #define SATP_MODE_48 _AC(0x9000000000000000, UL) #define SATP_MODE_57 _AC(0xa000000000000000, UL) +#define SATP_MODE_SHIFT 60 #define SATP_ASID_BITS 16 #define SATP_ASID_SHIFT 44 #define SATP_ASID_MASK _AC(0xFFFF, UL) diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 9c35e1427f73..992c5e407104 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -55,6 +55,7 @@ struct kvm_riscv_config { unsigned long marchid; unsigned long mimpid; unsigned long zicboz_block_size; + unsigned long satp_mode; }; /* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index 0dc2c2cecb45..85773e858120 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -152,6 +152,9 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, case KVM_REG_RISCV_CONFIG_REG(mimpid): reg_val = vcpu->arch.mimpid; break; + case KVM_REG_RISCV_CONFIG_REG(satp_mode): + reg_val = satp_mode >> SATP_MODE_SHIFT; + break; default: return -EINVAL; } @@ -234,6 +237,10 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, else return -EBUSY; break; + case KVM_REG_RISCV_CONFIG_REG(satp_mode): + if (reg_val != (satp_mode >> SATP_MODE_SHIFT)) + return -EINVAL; + break; default: return -EINVAL; } -- cgit From 2a88f38cd58d026c0e13f4aaa495965d4e9626b9 Mon Sep 17 00:00:00 2001 From: Daniel Henrique Barboza Date: Thu, 3 Aug 2023 13:32:53 -0300 Subject: RISC-V: KVM: return ENOENT in *_one_reg() when reg is unknown get_one_reg() and set_one_reg() are returning EINVAL errors for almost everything: if a reg doesn't exist, if a reg ID is malformatted, if the associated CPU extension that implements the reg isn't present in the host, and for set_one_reg() if the value being written is invalid. This isn't wrong according to the existing KVM API docs (EINVAL can be used when there's no such register) but adding more ENOENT instances will make easier for userspace to understand what went wrong. Existing userspaces can be affected by this error code change. We checked a few. As of current upstream code, crosvm doesn't check for any particular errno code when using kvm_(get|set)_one_reg(). Neither does QEMU. rust-vmm doesn't have kvm-riscv support yet. Thus we have a good chance of changing these error codes now while the KVM RISC-V ecosystem is still new, minimizing user impact. Change all get_one_reg() and set_one_reg() implementations to return -ENOENT at all "no such register" cases. Signed-off-by: Daniel Henrique Barboza Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/kvm/aia.c | 4 ++-- arch/riscv/kvm/vcpu_fp.c | 12 ++++++------ arch/riscv/kvm/vcpu_onereg.c | 36 ++++++++++++++++++------------------ arch/riscv/kvm/vcpu_sbi.c | 16 +++++++++------- arch/riscv/kvm/vcpu_timer.c | 8 ++++---- 5 files changed, 39 insertions(+), 37 deletions(-) diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c index 585a3b42c52c..74bb27440527 100644 --- a/arch/riscv/kvm/aia.c +++ b/arch/riscv/kvm/aia.c @@ -176,7 +176,7 @@ int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu, struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long)) - return -EINVAL; + return -ENOENT; *out_val = 0; if (kvm_riscv_aia_available()) @@ -192,7 +192,7 @@ int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu, struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long)) - return -EINVAL; + return -ENOENT; if (kvm_riscv_aia_available()) { ((unsigned long *)csr)[reg_num] = val; diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c index 9d8cbc42057a..08ba48a395aa 100644 --- a/arch/riscv/kvm/vcpu_fp.c +++ b/arch/riscv/kvm/vcpu_fp.c @@ -96,7 +96,7 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu, reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) reg_val = &cntx->fp.f.f[reg_num]; else - return -EINVAL; + return -ENOENT; } else if ((rtype == KVM_REG_RISCV_FP_D) && riscv_isa_extension_available(vcpu->arch.isa, d)) { if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) { @@ -109,9 +109,9 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu, return -EINVAL; reg_val = &cntx->fp.d.f[reg_num]; } else - return -EINVAL; + return -ENOENT; } else - return -EINVAL; + return -ENOENT; if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id))) return -EFAULT; @@ -141,7 +141,7 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu, reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) reg_val = &cntx->fp.f.f[reg_num]; else - return -EINVAL; + return -ENOENT; } else if ((rtype == KVM_REG_RISCV_FP_D) && riscv_isa_extension_available(vcpu->arch.isa, d)) { if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) { @@ -154,9 +154,9 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu, return -EINVAL; reg_val = &cntx->fp.d.f[reg_num]; } else - return -EINVAL; + return -ENOENT; } else - return -EINVAL; + return -ENOENT; if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id))) return -EFAULT; diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index 85773e858120..456e9f31441a 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -156,7 +156,7 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, reg_val = satp_mode >> SATP_MODE_SHIFT; break; default: - return -EINVAL; + return -ENOENT; } if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) @@ -242,7 +242,7 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, return -EINVAL; break; default: - return -EINVAL; + return -ENOENT; } return 0; @@ -262,7 +262,7 @@ static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu, if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) return -EINVAL; if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) - return -EINVAL; + return -ENOENT; if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) reg_val = cntx->sepc; @@ -273,7 +273,7 @@ static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu, reg_val = (cntx->sstatus & SR_SPP) ? KVM_RISCV_MODE_S : KVM_RISCV_MODE_U; else - return -EINVAL; + return -ENOENT; if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) return -EFAULT; @@ -295,7 +295,7 @@ static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu, if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) return -EINVAL; if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) - return -EINVAL; + return -ENOENT; if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) return -EFAULT; @@ -311,7 +311,7 @@ static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu, else cntx->sstatus &= ~SR_SPP; } else - return -EINVAL; + return -ENOENT; return 0; } @@ -323,7 +323,7 @@ static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu, struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) - return -EINVAL; + return -ENOENT; if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { kvm_riscv_vcpu_flush_interrupts(vcpu); @@ -342,7 +342,7 @@ static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu, struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) - return -EINVAL; + return -ENOENT; if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { reg_val &= VSIP_VALID_MASK; @@ -381,7 +381,7 @@ static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu, rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val); break; default: - rc = -EINVAL; + rc = -ENOENT; break; } if (rc) @@ -420,7 +420,7 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val); break; default: - rc = -EINVAL; + rc = -ENOENT; break; } if (rc) @@ -437,7 +437,7 @@ static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu, if (reg_num >= KVM_RISCV_ISA_EXT_MAX || reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) - return -EINVAL; + return -ENOENT; *reg_val = 0; host_isa_ext = kvm_isa_ext_arr[reg_num]; @@ -455,7 +455,7 @@ static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu, if (reg_num >= KVM_RISCV_ISA_EXT_MAX || reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) - return -EINVAL; + return -ENOENT; host_isa_ext = kvm_isa_ext_arr[reg_num]; if (!__riscv_isa_extension_available(NULL, host_isa_ext)) @@ -489,7 +489,7 @@ static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu, unsigned long i, ext_id, ext_val; if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST) - return -EINVAL; + return -ENOENT; for (i = 0; i < BITS_PER_LONG; i++) { ext_id = i + reg_num * BITS_PER_LONG; @@ -512,7 +512,7 @@ static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu, unsigned long i, ext_id; if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST) - return -EINVAL; + return -ENOENT; for_each_set_bit(i, ®_val, BITS_PER_LONG) { ext_id = i + reg_num * BITS_PER_LONG; @@ -554,7 +554,7 @@ static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu, reg_val = ~reg_val; break; default: - rc = -EINVAL; + rc = -ENOENT; } if (rc) return rc; @@ -592,7 +592,7 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, case KVM_REG_RISCV_SBI_MULTI_DIS: return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false); default: - return -EINVAL; + return -ENOENT; } return 0; @@ -627,7 +627,7 @@ int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, break; } - return -EINVAL; + return -ENOENT; } int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, @@ -659,5 +659,5 @@ int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, break; } - return -EINVAL; + return -ENOENT; } diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index 7b46e04fb667..9cd97091c723 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -140,8 +140,10 @@ static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu, const struct kvm_riscv_sbi_extension_entry *sext = NULL; struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; - if (reg_num >= KVM_RISCV_SBI_EXT_MAX || - (reg_val != 1 && reg_val != 0)) + if (reg_num >= KVM_RISCV_SBI_EXT_MAX) + return -ENOENT; + + if (reg_val != 1 && reg_val != 0) return -EINVAL; for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) { @@ -175,7 +177,7 @@ static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; if (reg_num >= KVM_RISCV_SBI_EXT_MAX) - return -EINVAL; + return -ENOENT; for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) { if (sbi_ext[i].ext_idx == reg_num) { @@ -206,7 +208,7 @@ static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu, unsigned long i, ext_id; if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST) - return -EINVAL; + return -ENOENT; for_each_set_bit(i, ®_val, BITS_PER_LONG) { ext_id = i + reg_num * BITS_PER_LONG; @@ -226,7 +228,7 @@ static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu, unsigned long i, ext_id, ext_val; if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST) - return -EINVAL; + return -ENOENT; for (i = 0; i < BITS_PER_LONG; i++) { ext_id = i + reg_num * BITS_PER_LONG; @@ -272,7 +274,7 @@ int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu, case KVM_REG_RISCV_SBI_MULTI_DIS: return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false); default: - return -EINVAL; + return -ENOENT; } return 0; @@ -307,7 +309,7 @@ int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu, reg_val = ~reg_val; break; default: - rc = -EINVAL; + rc = -ENOENT; } if (rc) return rc; diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c index 3ac2ff6a65da..527d269cafff 100644 --- a/arch/riscv/kvm/vcpu_timer.c +++ b/arch/riscv/kvm/vcpu_timer.c @@ -170,7 +170,7 @@ int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu, if (KVM_REG_SIZE(reg->id) != sizeof(u64)) return -EINVAL; if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64)) - return -EINVAL; + return -ENOENT; switch (reg_num) { case KVM_REG_RISCV_TIMER_REG(frequency): @@ -187,7 +187,7 @@ int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu, KVM_RISCV_TIMER_STATE_OFF; break; default: - return -EINVAL; + return -ENOENT; } if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) @@ -211,7 +211,7 @@ int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu, if (KVM_REG_SIZE(reg->id) != sizeof(u64)) return -EINVAL; if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64)) - return -EINVAL; + return -ENOENT; if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) return -EFAULT; @@ -233,7 +233,7 @@ int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu, ret = kvm_riscv_vcpu_timer_cancel(t); break; default: - ret = -EINVAL; + ret = -ENOENT; break; } -- cgit From a044ef71043ebc23903f657fd51b10783d34918b Mon Sep 17 00:00:00 2001 From: Daniel Henrique Barboza Date: Thu, 3 Aug 2023 13:32:54 -0300 Subject: RISC-V: KVM: use ENOENT in *_one_reg() when extension is unavailable Following a similar logic as the previous patch let's minimize the EINVAL usage in *_one_reg() APIs by using ENOENT when an extension that implements the reg is not available. For consistency we're also replacing an EOPNOTSUPP instance that should be an ENOENT since it's an "extension is not available" error. Signed-off-by: Daniel Henrique Barboza Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu_onereg.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index 456e9f31441a..1ffd8ac3800a 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -135,12 +135,12 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, break; case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) - return -EINVAL; + return -ENOENT; reg_val = riscv_cbom_block_size; break; case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ)) - return -EINVAL; + return -ENOENT; reg_val = riscv_cboz_block_size; break; case KVM_REG_RISCV_CONFIG_REG(mvendorid): @@ -459,7 +459,7 @@ static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu, host_isa_ext = kvm_isa_ext_arr[reg_num]; if (!__riscv_isa_extension_available(NULL, host_isa_ext)) - return -EOPNOTSUPP; + return -ENOENT; if (!vcpu->arch.ran_atleast_once) { /* -- cgit From e29f57911d61bcbbe442c72933c606225d3d4d68 Mon Sep 17 00:00:00 2001 From: Daniel Henrique Barboza Date: Thu, 3 Aug 2023 13:32:55 -0300 Subject: RISC-V: KVM: do not EOPNOTSUPP in set_one_reg() zicbo(m|z) zicbom_block_size and zicboz_block_size have a peculiar API: they can be read via get_one_reg() but any write will return a EOPNOTSUPP. It makes sense to return a 'not supported' error since both values can't be changed, but as far as userspace goes they're regs that are throwing the same EOPNOTSUPP error even if they were read beforehand via get_one_reg(), even if the same read value is being written back. EOPNOTSUPP is also returned even if ZICBOM/ZICBOZ aren't enabled in the host. Change both to work more like their counterparts in get_one_reg() and return -ENOENT if their respective extensions aren't available. After that, check if the userspace is written a valid value (i.e. the host value). Throw an -EINVAL if that's not case, let it slide otherwise. This allows both regs to be read/written by userspace in a 'lazy' manner, as long as the userspace doesn't change the reg vals. Suggested-by: Andrew Jones Signed-off-by: Daniel Henrique Barboza Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu_onereg.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index 1ffd8ac3800a..e06256dd8d24 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -216,9 +216,17 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, } break; case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): - return -EOPNOTSUPP; + if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) + return -ENOENT; + if (reg_val != riscv_cbom_block_size) + return -EINVAL; + break; case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): - return -EOPNOTSUPP; + if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ)) + return -ENOENT; + if (reg_val != riscv_cboz_block_size) + return -EINVAL; + break; case KVM_REG_RISCV_CONFIG_REG(mvendorid): if (!vcpu->arch.ran_atleast_once) vcpu->arch.mvendorid = reg_val; -- cgit From 432a8b35cc23d3af9c60e9c0d191d7ff737afbdc Mon Sep 17 00:00:00 2001 From: Daniel Henrique Barboza Date: Thu, 3 Aug 2023 13:32:56 -0300 Subject: RISC-V: KVM: do not EOPNOTSUPP in set KVM_REG_RISCV_TIMER_REG The KVM_REG_RISCV_TIMER_REG can be read via get_one_reg(). But trying to write anything in this reg via set_one_reg() results in an EOPNOTSUPP. Change the API to behave like cbom_block_size: instead of always erroring out with EOPNOTSUPP, allow userspace to write the same value (riscv_timebase) back, throwing an EINVAL if a different value is attempted. Signed-off-by: Daniel Henrique Barboza Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu_timer.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c index 527d269cafff..75486b25ac45 100644 --- a/arch/riscv/kvm/vcpu_timer.c +++ b/arch/riscv/kvm/vcpu_timer.c @@ -218,7 +218,8 @@ int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu, switch (reg_num) { case KVM_REG_RISCV_TIMER_REG(frequency): - ret = -EOPNOTSUPP; + if (reg_val != riscv_timebase) + return -EINVAL; break; case KVM_REG_RISCV_TIMER_REG(time): gt->time_delta = reg_val - get_cycles64(); -- cgit From d57304bbfb742bf744d2cd4dc3a08ce3fbfba787 Mon Sep 17 00:00:00 2001 From: Daniel Henrique Barboza Date: Thu, 3 Aug 2023 13:32:57 -0300 Subject: RISC-V: KVM: use EBUSY when !vcpu->arch.ran_atleast_once vcpu_set_reg_config() and vcpu_set_reg_isa_ext() is throwing an EOPNOTSUPP error when !vcpu->arch.ran_atleast_once. In similar cases we're throwing an EBUSY error, like in mvendorid/marchid/mimpid set_reg(). EOPNOTSUPP has a conotation of finality. EBUSY is more adequate in this case since its a condition/error related to the vcpu lifecycle. Change these EOPNOTSUPP instances to EBUSY. Suggested-by: Andrew Jones Signed-off-by: Daniel Henrique Barboza Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu_onereg.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index e06256dd8d24..971a2eb83180 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -212,7 +212,7 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, vcpu->arch.isa[0] = reg_val; kvm_riscv_vcpu_fp_reset(vcpu); } else { - return -EOPNOTSUPP; + return -EBUSY; } break; case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): @@ -484,7 +484,7 @@ static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu, return -EINVAL; kvm_riscv_vcpu_fp_reset(vcpu); } else { - return -EOPNOTSUPP; + return -EBUSY; } return 0; -- cgit From bea8d23713a2b2dc07ae9d7325b1269dff8c8a1f Mon Sep 17 00:00:00 2001 From: Daniel Henrique Barboza Date: Thu, 3 Aug 2023 13:32:58 -0300 Subject: RISC-V: KVM: avoid EBUSY when writing same ISA val kvm_riscv_vcpu_set_reg_config() will return -EBUSY if the ISA config reg is being written after the VCPU ran at least once. The same restriction isn't placed in kvm_riscv_vcpu_get_reg_config(), so there's a chance that we'll -EBUSY out on an ISA config reg write even if the userspace intended no changes to it. We'll allow the same form of 'lazy writing' that registers such as zicbom/zicboz_block_size supports: avoid erroring out if userspace made no changes to the ISA config reg. Signed-off-by: Daniel Henrique Barboza Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu_onereg.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index 971a2eb83180..a0b0364b038f 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -190,6 +190,13 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, if (fls(reg_val) >= RISCV_ISA_EXT_BASE) return -EINVAL; + /* + * Return early (i.e. do nothing) if reg_val is the same + * value retrievable via kvm_riscv_vcpu_get_reg_config(). + */ + if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK)) + break; + if (!vcpu->arch.ran_atleast_once) { /* Ignore the enable/disable request for certain extensions */ for (i = 0; i < RISCV_ISA_EXT_BASE; i++) { -- cgit From 63bd660657efade03270c8cc7e3d9b6993350e56 Mon Sep 17 00:00:00 2001 From: Daniel Henrique Barboza Date: Thu, 3 Aug 2023 13:32:59 -0300 Subject: RISC-V: KVM: avoid EBUSY when writing the same machine ID val Right now we do not allow any write in mvendorid/marchid/mimpid if the vcpu already started, preventing these regs to be changed. However, if userspace doesn't change them, an alternative is to consider the reg write a no-op and avoid erroring out altogether. Userpace can then be oblivious about KVM internals if no changes were intended in the first place. Allow the same form of 'lazy writing' that registers such as zicbom/zicboz_block_size supports: avoid erroring out if userspace makes no changes in mvendorid/marchid/mimpid during reg write. Signed-off-by: Daniel Henrique Barboza Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu_onereg.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index a0b0364b038f..81cb6b2784db 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -235,18 +235,24 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, return -EINVAL; break; case KVM_REG_RISCV_CONFIG_REG(mvendorid): + if (reg_val == vcpu->arch.mvendorid) + break; if (!vcpu->arch.ran_atleast_once) vcpu->arch.mvendorid = reg_val; else return -EBUSY; break; case KVM_REG_RISCV_CONFIG_REG(marchid): + if (reg_val == vcpu->arch.marchid) + break; if (!vcpu->arch.ran_atleast_once) vcpu->arch.marchid = reg_val; else return -EBUSY; break; case KVM_REG_RISCV_CONFIG_REG(mimpid): + if (reg_val == vcpu->arch.mimpid) + break; if (!vcpu->arch.ran_atleast_once) vcpu->arch.mimpid = reg_val; else -- cgit From 1099c80906d358db542c64aba370291ae4b932b7 Mon Sep 17 00:00:00 2001 From: Daniel Henrique Barboza Date: Thu, 3 Aug 2023 13:33:00 -0300 Subject: RISC-V: KVM: avoid EBUSY when writing the same isa_ext val riscv_vcpu_set_isa_ext_single() will prevent any write of isa_ext regs if the vcpu already started spinning. But if there's no extension state (enabled/disabled) made by the userspace, there's no need to -EBUSY out - we can treat the operation as a no-op. zicbom/zicboz_block_size, ISA config reg and mvendorid/march/mimpid already works in a more permissive manner w.r.t userspace writes being a no-op, so let's do the same with isa_ext writes. Signed-off-by: Daniel Henrique Barboza Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu_onereg.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index 81cb6b2784db..989ea32dbcbe 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -482,6 +482,9 @@ static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu, if (!__riscv_isa_extension_available(NULL, host_isa_ext)) return -ENOENT; + if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa)) + return 0; + if (!vcpu->arch.ran_atleast_once) { /* * All multi-letter extension and a few single letter -- cgit From 1deaf754f5310e348e6386e75b7f16a0089fa3b7 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Thu, 3 Aug 2023 13:33:01 -0300 Subject: RISC-V: KVM: Improve vector save/restore errors kvm_riscv_vcpu_(get/set)_reg_vector() now returns ENOENT if V is not available, EINVAL if reg type is not of VECTOR type, and any error that might be thrown by kvm_riscv_vcpu_vreg_addr(). Signed-off-by: Andrew Jones Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu_vector.c | 60 ++++++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 27 deletions(-) diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c index edd2eecbddc2..39c5bceb4d1b 100644 --- a/arch/riscv/kvm/vcpu_vector.c +++ b/arch/riscv/kvm/vcpu_vector.c @@ -91,44 +91,44 @@ void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu) } #endif -static void *kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu, +static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu, unsigned long reg_num, - size_t reg_size) + size_t reg_size, + void **reg_val) { struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; - void *reg_val; size_t vlenb = riscv_v_vsize / 32; if (reg_num < KVM_REG_RISCV_VECTOR_REG(0)) { if (reg_size != sizeof(unsigned long)) - return NULL; + return -EINVAL; switch (reg_num) { case KVM_REG_RISCV_VECTOR_CSR_REG(vstart): - reg_val = &cntx->vector.vstart; + *reg_val = &cntx->vector.vstart; break; case KVM_REG_RISCV_VECTOR_CSR_REG(vl): - reg_val = &cntx->vector.vl; + *reg_val = &cntx->vector.vl; break; case KVM_REG_RISCV_VECTOR_CSR_REG(vtype): - reg_val = &cntx->vector.vtype; + *reg_val = &cntx->vector.vtype; break; case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr): - reg_val = &cntx->vector.vcsr; + *reg_val = &cntx->vector.vcsr; break; case KVM_REG_RISCV_VECTOR_CSR_REG(datap): default: - return NULL; + return -ENOENT; } } else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) { if (reg_size != vlenb) - return NULL; - reg_val = cntx->vector.datap + return -EINVAL; + *reg_val = cntx->vector.datap + (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb; } else { - return NULL; + return -ENOENT; } - return reg_val; + return 0; } int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu, @@ -141,17 +141,20 @@ int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu, unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | rtype); - void *reg_val = NULL; size_t reg_size = KVM_REG_SIZE(reg->id); + void *reg_val; + int rc; - if (rtype == KVM_REG_RISCV_VECTOR && - riscv_isa_extension_available(isa, v)) { - reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size); - } - - if (!reg_val) + if (rtype != KVM_REG_RISCV_VECTOR) return -EINVAL; + if (!riscv_isa_extension_available(isa, v)) + return -ENOENT; + + rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, ®_val); + if (rc) + return rc; + if (copy_to_user(uaddr, reg_val, reg_size)) return -EFAULT; @@ -168,17 +171,20 @@ int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu, unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | rtype); - void *reg_val = NULL; size_t reg_size = KVM_REG_SIZE(reg->id); + void *reg_val; + int rc; - if (rtype == KVM_REG_RISCV_VECTOR && - riscv_isa_extension_available(isa, v)) { - reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size); - } - - if (!reg_val) + if (rtype != KVM_REG_RISCV_VECTOR) return -EINVAL; + if (!riscv_isa_extension_available(isa, v)) + return -ENOENT; + + rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, ®_val); + if (rc) + return rc; + if (copy_from_user(reg_val, uaddr, reg_size)) return -EFAULT; -- cgit From e47f3c2843c2c23467f9447ed977dfccbe13a6f7 Mon Sep 17 00:00:00 2001 From: Daniel Henrique Barboza Date: Thu, 3 Aug 2023 13:33:02 -0300 Subject: docs: kvm: riscv: document EBUSY in KVM_SET_ONE_REG The EBUSY errno is being used for KVM_SET_ONE_REG as a way to tell userspace that a given reg can't be changed after the vcpu started. Signed-off-by: Daniel Henrique Barboza Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- Documentation/virt/kvm/api.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index c0ddd3035462..3249fb56cc69 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -2259,6 +2259,8 @@ Errors: EINVAL invalid register ID, or no such register or used with VMs in protected virtualization mode on s390 EPERM (arm64) register access not allowed before vcpu finalization + EBUSY (riscv) changing register value not allowed after the vcpu + has run at least once ====== ============================================================ (These error codes are indicative only: do not rely on a specific error -- cgit From 630b4cee9c378e2dbb56ed84f71fc2fac50b716e Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Fri, 4 Aug 2023 16:56:18 +0300 Subject: RISC-V: KVM: Improve vector save/restore functions Make two nonfunctional changes to the vector get/set vector reg functions and their supporting function for simplification and readability. The first is to not pass KVM_REG_RISCV_VECTOR, but rather integrate it directly into the masking. The second is to rename reg_val to reg_addr where and address is used instead of a value. Also opportunistically touch up some of the code formatting for a third nonfunctional change. Signed-off-by: Andrew Jones Reviewed-by: Daniel Henrique Barboza Signed-off-by: Anup Patel --- arch/riscv/include/asm/kvm_vcpu_vector.h | 6 ++--- arch/riscv/kvm/vcpu_onereg.c | 6 ++--- arch/riscv/kvm/vcpu_vector.c | 46 +++++++++++++------------------- 3 files changed, 23 insertions(+), 35 deletions(-) diff --git a/arch/riscv/include/asm/kvm_vcpu_vector.h b/arch/riscv/include/asm/kvm_vcpu_vector.h index ff994fdd6d0d..27f5bccdd8b0 100644 --- a/arch/riscv/include/asm/kvm_vcpu_vector.h +++ b/arch/riscv/include/asm/kvm_vcpu_vector.h @@ -74,9 +74,7 @@ static inline void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu) #endif int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg, - unsigned long rtype); + const struct kvm_one_reg *reg); int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg, - unsigned long rtype); + const struct kvm_one_reg *reg); #endif diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index 989ea32dbcbe..9fee1c176fbb 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -645,8 +645,7 @@ int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, case KVM_REG_RISCV_SBI_EXT: return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg); case KVM_REG_RISCV_VECTOR: - return kvm_riscv_vcpu_set_reg_vector(vcpu, reg, - KVM_REG_RISCV_VECTOR); + return kvm_riscv_vcpu_set_reg_vector(vcpu, reg); default: break; } @@ -677,8 +676,7 @@ int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, case KVM_REG_RISCV_SBI_EXT: return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg); case KVM_REG_RISCV_VECTOR: - return kvm_riscv_vcpu_get_reg_vector(vcpu, reg, - KVM_REG_RISCV_VECTOR); + return kvm_riscv_vcpu_get_reg_vector(vcpu, reg); default: break; } diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c index 39c5bceb4d1b..b430cbb69521 100644 --- a/arch/riscv/kvm/vcpu_vector.c +++ b/arch/riscv/kvm/vcpu_vector.c @@ -92,9 +92,9 @@ void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu) #endif static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu, - unsigned long reg_num, - size_t reg_size, - void **reg_val) + unsigned long reg_num, + size_t reg_size, + void **reg_addr) { struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; size_t vlenb = riscv_v_vsize / 32; @@ -104,16 +104,16 @@ static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu, return -EINVAL; switch (reg_num) { case KVM_REG_RISCV_VECTOR_CSR_REG(vstart): - *reg_val = &cntx->vector.vstart; + *reg_addr = &cntx->vector.vstart; break; case KVM_REG_RISCV_VECTOR_CSR_REG(vl): - *reg_val = &cntx->vector.vl; + *reg_addr = &cntx->vector.vl; break; case KVM_REG_RISCV_VECTOR_CSR_REG(vtype): - *reg_val = &cntx->vector.vtype; + *reg_addr = &cntx->vector.vtype; break; case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr): - *reg_val = &cntx->vector.vcsr; + *reg_addr = &cntx->vector.vcsr; break; case KVM_REG_RISCV_VECTOR_CSR_REG(datap): default: @@ -122,8 +122,8 @@ static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu, } else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) { if (reg_size != vlenb) return -EINVAL; - *reg_val = cntx->vector.datap - + (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb; + *reg_addr = cntx->vector.datap + + (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb; } else { return -ENOENT; } @@ -132,60 +132,52 @@ static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu, } int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg, - unsigned long rtype) + const struct kvm_one_reg *reg) { unsigned long *isa = vcpu->arch.isa; unsigned long __user *uaddr = (unsigned long __user *)(unsigned long)reg->addr; unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | - rtype); + KVM_REG_RISCV_VECTOR); size_t reg_size = KVM_REG_SIZE(reg->id); - void *reg_val; + void *reg_addr; int rc; - if (rtype != KVM_REG_RISCV_VECTOR) - return -EINVAL; - if (!riscv_isa_extension_available(isa, v)) return -ENOENT; - rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, ®_val); + rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, ®_addr); if (rc) return rc; - if (copy_to_user(uaddr, reg_val, reg_size)) + if (copy_to_user(uaddr, reg_addr, reg_size)) return -EFAULT; return 0; } int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg, - unsigned long rtype) + const struct kvm_one_reg *reg) { unsigned long *isa = vcpu->arch.isa; unsigned long __user *uaddr = (unsigned long __user *)(unsigned long)reg->addr; unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | - rtype); + KVM_REG_RISCV_VECTOR); size_t reg_size = KVM_REG_SIZE(reg->id); - void *reg_val; + void *reg_addr; int rc; - if (rtype != KVM_REG_RISCV_VECTOR) - return -EINVAL; - if (!riscv_isa_extension_available(isa, v)) return -ENOENT; - rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, ®_val); + rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, ®_addr); if (rc) return rc; - if (copy_from_user(reg_val, uaddr, reg_size)) + if (copy_from_user(reg_addr, uaddr, reg_size)) return -EFAULT; return 0; -- cgit From dfaf20af7649c82b0c53103e3bd9f3e6c3751c9d Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 25 Jul 2023 16:41:27 +0800 Subject: KVM: arm64: selftests: Replace str_with_index with strdup_printf The original author of aarch64/get-reg-list.c (me) was wearing tunnel vision goggles when implementing str_with_index(). There's no reason to have such a special case string function. Instead, take inspiration from glib and implement strdup_printf. The implementation builds on vasprintf() which requires _GNU_SOURCE, but we require _GNU_SOURCE in most files already. Signed-off-by: Andrew Jones Signed-off-by: Haibo Xu Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/aarch64/get-reg-list.c | 23 +++++----------------- tools/testing/selftests/kvm/include/test_util.h | 2 ++ tools/testing/selftests/kvm/lib/test_util.c | 15 ++++++++++++++ 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c index 4f10055af2aa..52244de20dce 100644 --- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c +++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c @@ -180,19 +180,6 @@ static bool check_supported_feat_reg(struct kvm_vcpu *vcpu, __u64 reg) return true; } -static const char *str_with_index(const char *template, __u64 index) -{ - char *str, *p; - int n; - - str = strdup(template); - p = strstr(str, "##"); - n = sprintf(p, "%lld", index); - strcat(p + n, strstr(template, "##") + 2); - - return (const char *)str; -} - #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK) #define CORE_REGS_XX_NR_WORDS 2 @@ -211,7 +198,7 @@ static const char *core_id_to_str(struct vcpu_config *c, __u64 id) KVM_REG_ARM_CORE_REG(regs.regs[30]): idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS; TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", config_name(c), idx); - return str_with_index("KVM_REG_ARM_CORE_REG(regs.regs[##])", idx); + return strdup_printf("KVM_REG_ARM_CORE_REG(regs.regs[%lld])", idx); case KVM_REG_ARM_CORE_REG(regs.sp): return "KVM_REG_ARM_CORE_REG(regs.sp)"; case KVM_REG_ARM_CORE_REG(regs.pc): @@ -226,12 +213,12 @@ static const char *core_id_to_str(struct vcpu_config *c, __u64 id) KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]): idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS; TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", config_name(c), idx); - return str_with_index("KVM_REG_ARM_CORE_REG(spsr[##])", idx); + return strdup_printf("KVM_REG_ARM_CORE_REG(spsr[%lld])", idx); case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ... KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]): idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS; TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", config_name(c), idx); - return str_with_index("KVM_REG_ARM_CORE_REG(fp_regs.vregs[##])", idx); + return strdup_printf("KVM_REG_ARM_CORE_REG(fp_regs.vregs[%lld])", idx); case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)"; case KVM_REG_ARM_CORE_REG(fp_regs.fpcr): @@ -260,13 +247,13 @@ static const char *sve_id_to_str(struct vcpu_config *c, __u64 id) n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1); TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0), "%s: Unexpected bits set in SVE ZREG id: 0x%llx", config_name(c), id); - return str_with_index("KVM_REG_ARM64_SVE_ZREG(##, 0)", n); + return strdup_printf("KVM_REG_ARM64_SVE_ZREG(%lld, 0)", n); case KVM_REG_ARM64_SVE_PREG_BASE ... KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1: n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1); TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0), "%s: Unexpected bits set in SVE PREG id: 0x%llx", config_name(c), id); - return str_with_index("KVM_REG_ARM64_SVE_PREG(##, 0)", n); + return strdup_printf("KVM_REG_ARM64_SVE_PREG(%lld, 0)", n); case KVM_REG_ARM64_SVE_FFR_BASE: TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0), "%s: Unexpected bits set in SVE FFR id: 0x%llx", config_name(c), id); diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index a6e9f215ce70..7e0182f837b5 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -186,4 +186,6 @@ static inline uint32_t atoi_non_negative(const char *name, const char *num_str) return num; } +char *strdup_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2), nonnull(1))); + #endif /* SELFTEST_KVM_TEST_UTIL_H */ diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c index b772193f6c18..3e36019eeb4a 100644 --- a/tools/testing/selftests/kvm/lib/test_util.c +++ b/tools/testing/selftests/kvm/lib/test_util.c @@ -5,6 +5,9 @@ * Copyright (C) 2020, Google LLC. */ +#define _GNU_SOURCE +#include +#include #include #include #include @@ -377,3 +380,15 @@ int atoi_paranoid(const char *num_str) return num; } + +char *strdup_printf(const char *fmt, ...) +{ + va_list ap; + char *str; + + va_start(ap, fmt); + vasprintf(&str, fmt, ap); + va_end(ap); + + return str; +} -- cgit From 2653860812949d485e18414b72fa542c84763f71 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 25 Jul 2023 16:41:28 +0800 Subject: KVM: arm64: selftests: Drop SVE cap check in print_reg The check doesn't prove much anyway, as the reg lists could be messed up too. Just drop the check to simplify making print_reg more independent. Signed-off-by: Andrew Jones Signed-off-by: Haibo Xu Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/aarch64/get-reg-list.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c index 52244de20dce..335bd5eb250a 100644 --- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c +++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c @@ -129,16 +129,6 @@ static const char *config_name(struct vcpu_config *c) return c->name; } -static bool has_cap(struct vcpu_config *c, long capability) -{ - struct reg_sublist *s; - - for_each_sublist(c, s) - if (s->capability == capability) - return true; - return false; -} - static bool filter_reg(__u64 reg) { /* @@ -335,10 +325,7 @@ static void print_reg(struct vcpu_config *c, __u64 id) printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff); break; case KVM_REG_ARM64_SVE: - if (has_cap(c, KVM_CAP_ARM_SVE)) - printf("\t%s,\n", sve_id_to_str(c, id)); - else - TEST_FAIL("%s: KVM_REG_ARM64_SVE is an unexpected coproc type in reg id: 0x%llx", config_name(c), id); + printf("\t%s,\n", sve_id_to_str(c, id)); break; default: TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx", -- cgit From c2b5aa7aebbae018edd988e9815087abf72e526b Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 25 Jul 2023 16:41:29 +0800 Subject: KVM: arm64: selftests: Remove print_reg's dependency on vcpu_config print_reg() and its helpers only use the vcpu_config pointer for config_name(). So just pass the config name in instead, which is used as a prefix in asserts. print_reg() can now be compiled independently of config_name(). Signed-off-by: Andrew Jones Signed-off-by: Haibo Xu Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/aarch64/get-reg-list.c | 52 +++++++++++----------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c index 335bd5eb250a..c9412be3a04c 100644 --- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c +++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c @@ -176,7 +176,7 @@ static bool check_supported_feat_reg(struct kvm_vcpu *vcpu, __u64 reg) #define CORE_SPSR_XX_NR_WORDS 2 #define CORE_FPREGS_XX_NR_WORDS 4 -static const char *core_id_to_str(struct vcpu_config *c, __u64 id) +static const char *core_id_to_str(const char *prefix, __u64 id) { __u64 core_off = id & ~REG_MASK, idx; @@ -187,7 +187,7 @@ static const char *core_id_to_str(struct vcpu_config *c, __u64 id) case KVM_REG_ARM_CORE_REG(regs.regs[0]) ... KVM_REG_ARM_CORE_REG(regs.regs[30]): idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS; - TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", config_name(c), idx); + TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", prefix, idx); return strdup_printf("KVM_REG_ARM_CORE_REG(regs.regs[%lld])", idx); case KVM_REG_ARM_CORE_REG(regs.sp): return "KVM_REG_ARM_CORE_REG(regs.sp)"; @@ -202,12 +202,12 @@ static const char *core_id_to_str(struct vcpu_config *c, __u64 id) case KVM_REG_ARM_CORE_REG(spsr[0]) ... KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]): idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS; - TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", config_name(c), idx); + TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", prefix, idx); return strdup_printf("KVM_REG_ARM_CORE_REG(spsr[%lld])", idx); case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ... KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]): idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS; - TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", config_name(c), idx); + TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", prefix, idx); return strdup_printf("KVM_REG_ARM_CORE_REG(fp_regs.vregs[%lld])", idx); case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)"; @@ -215,11 +215,11 @@ static const char *core_id_to_str(struct vcpu_config *c, __u64 id) return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)"; } - TEST_FAIL("%s: Unknown core reg id: 0x%llx", config_name(c), id); + TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id); return NULL; } -static const char *sve_id_to_str(struct vcpu_config *c, __u64 id) +static const char *sve_id_to_str(const char *prefix, __u64 id) { __u64 sve_off, n, i; @@ -229,37 +229,37 @@ static const char *sve_id_to_str(struct vcpu_config *c, __u64 id) sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1)); i = id & (KVM_ARM64_SVE_MAX_SLICES - 1); - TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", config_name(c), id); + TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", prefix, id); switch (sve_off) { case KVM_REG_ARM64_SVE_ZREG_BASE ... KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1: n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1); TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0), - "%s: Unexpected bits set in SVE ZREG id: 0x%llx", config_name(c), id); + "%s: Unexpected bits set in SVE ZREG id: 0x%llx", prefix, id); return strdup_printf("KVM_REG_ARM64_SVE_ZREG(%lld, 0)", n); case KVM_REG_ARM64_SVE_PREG_BASE ... KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1: n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1); TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0), - "%s: Unexpected bits set in SVE PREG id: 0x%llx", config_name(c), id); + "%s: Unexpected bits set in SVE PREG id: 0x%llx", prefix, id); return strdup_printf("KVM_REG_ARM64_SVE_PREG(%lld, 0)", n); case KVM_REG_ARM64_SVE_FFR_BASE: TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0), - "%s: Unexpected bits set in SVE FFR id: 0x%llx", config_name(c), id); + "%s: Unexpected bits set in SVE FFR id: 0x%llx", prefix, id); return "KVM_REG_ARM64_SVE_FFR(0)"; } return NULL; } -static void print_reg(struct vcpu_config *c, __u64 id) +static void print_reg(const char *prefix, __u64 id) { unsigned op0, op1, crn, crm, op2; const char *reg_size = NULL; TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64, - "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", config_name(c), id); + "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", prefix, id); switch (id & KVM_REG_SIZE_MASK) { case KVM_REG_SIZE_U8: @@ -291,16 +291,16 @@ static void print_reg(struct vcpu_config *c, __u64 id) break; default: TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx", - config_name(c), (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id); + prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id); } switch (id & KVM_REG_ARM_COPROC_MASK) { case KVM_REG_ARM_CORE: - printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(c, id)); + printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(prefix, id)); break; case KVM_REG_ARM_DEMUX: TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)), - "%s: Unexpected bits set in DEMUX reg id: 0x%llx", config_name(c), id); + "%s: Unexpected bits set in DEMUX reg id: 0x%llx", prefix, id); printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n", reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK); break; @@ -311,25 +311,25 @@ static void print_reg(struct vcpu_config *c, __u64 id) crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT; op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT; TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2), - "%s: Unexpected bits set in SYSREG reg id: 0x%llx", config_name(c), id); + "%s: Unexpected bits set in SYSREG reg id: 0x%llx", prefix, id); printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2); break; case KVM_REG_ARM_FW: TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff), - "%s: Unexpected bits set in FW reg id: 0x%llx", config_name(c), id); + "%s: Unexpected bits set in FW reg id: 0x%llx", prefix, id); printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff); break; case KVM_REG_ARM_FW_FEAT_BMAP: TEST_ASSERT(id == KVM_REG_ARM_FW_FEAT_BMAP_REG(id & 0xffff), - "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", config_name(c), id); + "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", prefix, id); printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff); break; case KVM_REG_ARM64_SVE: - printf("\t%s,\n", sve_id_to_str(c, id)); + printf("\t%s,\n", sve_id_to_str(prefix, id)); break; default: TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx", - config_name(c), (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id); + prefix, (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id); } } @@ -458,7 +458,7 @@ static void run_test(struct vcpu_config *c) __u64 id = reg_list->reg[i]; if ((print_list && !filter_reg(id)) || (print_filtered && filter_reg(id))) - print_reg(c, id); + print_reg(config_name(c), id); } putchar('\n'); return; @@ -486,7 +486,7 @@ static void run_test(struct vcpu_config *c) ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr); if (ret) { printf("%s: Failed to get ", config_name(c)); - print_reg(c, reg.id); + print_reg(config_name(c), reg.id); putchar('\n'); ++failed_get; } @@ -498,7 +498,7 @@ static void run_test(struct vcpu_config *c) ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); if (ret != -1 || errno != EPERM) { printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno); - print_reg(c, reg.id); + print_reg(config_name(c), reg.id); putchar('\n'); ++failed_reject; } @@ -510,7 +510,7 @@ static void run_test(struct vcpu_config *c) ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); if (ret) { printf("%s: Failed to set ", config_name(c)); - print_reg(c, reg.id); + print_reg(config_name(c), reg.id); putchar('\n'); ++failed_set; } @@ -548,7 +548,7 @@ static void run_test(struct vcpu_config *c) "Consider adding them to the blessed reg " "list with the following lines:\n\n", config_name(c), new_regs); for_each_new_reg(i) - print_reg(c, reg_list->reg[i]); + print_reg(config_name(c), reg_list->reg[i]); putchar('\n'); } @@ -556,7 +556,7 @@ static void run_test(struct vcpu_config *c) printf("\n%s: There are %d missing registers.\n" "The following lines are missing registers:\n\n", config_name(c), missing_regs); for_each_missing_reg(i) - print_reg(c, blessed_reg[i]); + print_reg(config_name(c), blessed_reg[i]); putchar('\n'); } -- cgit From 9177b715cdccdfd6711511440cf43b8be6e28c6c Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 25 Jul 2023 16:41:30 +0800 Subject: KVM: arm64: selftests: Rename vcpu_config and add to kvm_util.h Rename vcpu_config to vcpu_reg_list to be more specific and add it to kvm_util.h. While it may not get used outside get-reg-list tests, exporting it doesn't hurt, as long as it has a unique enough name. This is a step in the direction of sharing most of the get- reg-list test code between architectures. Signed-off-by: Andrew Jones Signed-off-by: Haibo Xu Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/aarch64/get-reg-list.c | 60 ++++++++-------------- .../testing/selftests/kvm/include/kvm_util_base.h | 16 ++++++ 2 files changed, 38 insertions(+), 38 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c index c9412be3a04c..cb6b40e167d0 100644 --- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c +++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c @@ -37,17 +37,6 @@ static struct kvm_reg_list *reg_list; static __u64 *blessed_reg, blessed_n; -struct reg_sublist { - const char *name; - long capability; - int feature; - bool finalize; - __u64 *regs; - __u64 regs_n; - __u64 *rejects_set; - __u64 rejects_set_n; -}; - struct feature_id_reg { __u64 reg; __u64 id_reg; @@ -76,12 +65,7 @@ static struct feature_id_reg feat_id_regs[] = { } }; -struct vcpu_config { - char *name; - struct reg_sublist sublists[]; -}; - -static struct vcpu_config *vcpu_configs[]; +static struct vcpu_reg_list *vcpu_configs[]; static int vcpu_configs_n; #define for_each_sublist(c, s) \ @@ -103,9 +87,9 @@ static int vcpu_configs_n; for_each_reg_filtered(i) \ if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i])) -static const char *config_name(struct vcpu_config *c) +static const char *config_name(struct vcpu_reg_list *c) { - struct reg_sublist *s; + struct vcpu_reg_sublist *s; int len = 0; if (c->name) @@ -390,18 +374,18 @@ static void core_reg_fixup(void) reg_list = tmp; } -static void prepare_vcpu_init(struct vcpu_config *c, struct kvm_vcpu_init *init) +static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init) { - struct reg_sublist *s; + struct vcpu_reg_sublist *s; for_each_sublist(c, s) if (s->capability) init->features[s->feature / 32] |= 1 << (s->feature % 32); } -static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_config *c) +static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) { - struct reg_sublist *s; + struct vcpu_reg_sublist *s; int feature; for_each_sublist(c, s) { @@ -412,9 +396,9 @@ static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_config *c) } } -static void check_supported(struct vcpu_config *c) +static void check_supported(struct vcpu_reg_list *c) { - struct reg_sublist *s; + struct vcpu_reg_sublist *s; for_each_sublist(c, s) { if (!s->capability) @@ -430,14 +414,14 @@ static bool print_list; static bool print_filtered; static bool fixup_core_regs; -static void run_test(struct vcpu_config *c) +static void run_test(struct vcpu_reg_list *c) { struct kvm_vcpu_init init = { .target = -1, }; int new_regs = 0, missing_regs = 0, i, n; int failed_get = 0, failed_set = 0, failed_reject = 0; struct kvm_vcpu *vcpu; struct kvm_vm *vm; - struct reg_sublist *s; + struct vcpu_reg_sublist *s; check_supported(c); @@ -574,7 +558,7 @@ static void run_test(struct vcpu_config *c) static void help(void) { - struct vcpu_config *c; + struct vcpu_reg_list *c; int i; printf( @@ -598,9 +582,9 @@ static void help(void) ); } -static struct vcpu_config *parse_config(const char *config) +static struct vcpu_reg_list *parse_config(const char *config) { - struct vcpu_config *c; + struct vcpu_reg_list *c; int i; if (config[8] != '=') @@ -620,7 +604,7 @@ static struct vcpu_config *parse_config(const char *config) int main(int ac, char **av) { - struct vcpu_config *c, *sel = NULL; + struct vcpu_reg_list *c, *sel = NULL; int i, ret = 0; pid_t pid; @@ -1104,14 +1088,14 @@ static __u64 pauth_generic_regs[] = { .regs_n = ARRAY_SIZE(pauth_generic_regs), \ } -static struct vcpu_config vregs_config = { +static struct vcpu_reg_list vregs_config = { .sublists = { BASE_SUBLIST, VREGS_SUBLIST, {0}, }, }; -static struct vcpu_config vregs_pmu_config = { +static struct vcpu_reg_list vregs_pmu_config = { .sublists = { BASE_SUBLIST, VREGS_SUBLIST, @@ -1119,14 +1103,14 @@ static struct vcpu_config vregs_pmu_config = { {0}, }, }; -static struct vcpu_config sve_config = { +static struct vcpu_reg_list sve_config = { .sublists = { BASE_SUBLIST, SVE_SUBLIST, {0}, }, }; -static struct vcpu_config sve_pmu_config = { +static struct vcpu_reg_list sve_pmu_config = { .sublists = { BASE_SUBLIST, SVE_SUBLIST, @@ -1134,7 +1118,7 @@ static struct vcpu_config sve_pmu_config = { {0}, }, }; -static struct vcpu_config pauth_config = { +static struct vcpu_reg_list pauth_config = { .sublists = { BASE_SUBLIST, VREGS_SUBLIST, @@ -1142,7 +1126,7 @@ static struct vcpu_config pauth_config = { {0}, }, }; -static struct vcpu_config pauth_pmu_config = { +static struct vcpu_reg_list pauth_pmu_config = { .sublists = { BASE_SUBLIST, VREGS_SUBLIST, @@ -1152,7 +1136,7 @@ static struct vcpu_config pauth_pmu_config = { }, }; -static struct vcpu_config *vcpu_configs[] = { +static struct vcpu_reg_list *vcpu_configs[] = { &vregs_config, &vregs_pmu_config, &sve_config, diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index eb1ff597bcca..b5189c7df482 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -15,6 +15,7 @@ #include #include #include "linux/rbtree.h" +#include #include @@ -124,6 +125,21 @@ struct kvm_vm { uint32_t memslots[NR_MEM_REGIONS]; }; +struct vcpu_reg_sublist { + const char *name; + long capability; + int feature; + bool finalize; + __u64 *regs; + __u64 regs_n; + __u64 *rejects_set; + __u64 rejects_set_n; +}; + +struct vcpu_reg_list { + char *name; + struct vcpu_reg_sublist sublists[]; +}; #define kvm_for_each_vcpu(vm, i, vcpu) \ for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \ -- cgit From 0ace6bda5701a38211ac21d45d4683721758e844 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 25 Jul 2023 16:41:31 +0800 Subject: KVM: arm64: selftests: Delete core_reg_fixup core_reg_fixup() complicates sharing the get-reg-list test with other architectures. Rather than work at keeping it, with plenty of #ifdeffery, just delete it, as it's unlikely to test a kernel based on anything older than v5.2 with the get-reg-list test, which is a test meant to check for regressions in new kernels. (And, an older version of the test can still be used for older kernels if necessary.) Signed-off-by: Andrew Jones Signed-off-by: Haibo Xu Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/aarch64/get-reg-list.c | 83 +++------------------- 1 file changed, 10 insertions(+), 73 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c index cb6b40e167d0..11630bbfb86b 100644 --- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c +++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c @@ -17,12 +17,10 @@ * by running the test with the --list command line argument. * * Note, the blessed list should be created from the oldest possible - * kernel. We can't go older than v4.15, though, because that's the first - * release to expose the ID system registers in KVM_GET_REG_LIST, see - * commit 93390c0a1b20 ("arm64: KVM: Hide unsupported AArch64 CPU features - * from guests"). Also, one must use the --core-reg-fixup command line - * option when running on an older kernel that doesn't include df205b5c6328 - * ("KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST") + * kernel. We can't go older than v5.2, though, because that's the first + * release which includes df205b5c6328 ("KVM: arm64: Filter out invalid + * core register IDs in KVM_GET_REG_LIST"). Without that commit the core + * registers won't match expectations. */ #include #include @@ -317,63 +315,6 @@ static void print_reg(const char *prefix, __u64 id) } } -/* - * Older kernels listed each 32-bit word of CORE registers separately. - * For 64 and 128-bit registers we need to ignore the extra words. We - * also need to fixup the sizes, because the older kernels stated all - * registers were 64-bit, even when they weren't. - */ -static void core_reg_fixup(void) -{ - struct kvm_reg_list *tmp; - __u64 id, core_off; - int i; - - tmp = calloc(1, sizeof(*tmp) + reg_list->n * sizeof(__u64)); - - for (i = 0; i < reg_list->n; ++i) { - id = reg_list->reg[i]; - - if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM_CORE) { - tmp->reg[tmp->n++] = id; - continue; - } - - core_off = id & ~REG_MASK; - - switch (core_off) { - case 0x52: case 0xd2: case 0xd6: - /* - * These offsets are pointing at padding. - * We need to ignore them too. - */ - continue; - case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ... - KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]): - if (core_off & 3) - continue; - id &= ~KVM_REG_SIZE_MASK; - id |= KVM_REG_SIZE_U128; - tmp->reg[tmp->n++] = id; - continue; - case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): - case KVM_REG_ARM_CORE_REG(fp_regs.fpcr): - id &= ~KVM_REG_SIZE_MASK; - id |= KVM_REG_SIZE_U32; - tmp->reg[tmp->n++] = id; - continue; - default: - if (core_off & 1) - continue; - tmp->reg[tmp->n++] = id; - break; - } - } - - free(reg_list); - reg_list = tmp; -} - static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init) { struct vcpu_reg_sublist *s; @@ -412,7 +353,6 @@ static void check_supported(struct vcpu_reg_list *c) static bool print_list; static bool print_filtered; -static bool fixup_core_regs; static void run_test(struct vcpu_reg_list *c) { @@ -433,9 +373,6 @@ static void run_test(struct vcpu_reg_list *c) reg_list = vcpu_get_reg_list(vcpu); - if (fixup_core_regs) - core_reg_fixup(); - if (print_list || print_filtered) { putchar('\n'); for_each_reg(i) { @@ -563,7 +500,7 @@ static void help(void) printf( "\n" - "usage: get-reg-list [--config=] [--list] [--list-filtered] [--core-reg-fixup]\n\n" + "usage: get-reg-list [--config=] [--list] [--list-filtered]\n\n" " --config= Used to select a specific vcpu configuration for the test/listing\n" " '' may be\n"); @@ -577,7 +514,6 @@ static void help(void) "\n" " --list Print the register list rather than test it (requires --config)\n" " --list-filtered Print registers that would normally be filtered out (requires --config)\n" - " --core-reg-fixup Needed when running on old kernels with broken core reg listings\n" "\n" ); } @@ -609,9 +545,7 @@ int main(int ac, char **av) pid_t pid; for (i = 1; i < ac; ++i) { - if (strcmp(av[i], "--core-reg-fixup") == 0) - fixup_core_regs = true; - else if (strncmp(av[i], "--config", 8) == 0) + if (strncmp(av[i], "--config", 8) == 0) sel = parse_config(av[i]); else if (strcmp(av[i], "--list") == 0) print_list = true; @@ -654,8 +588,11 @@ int main(int ac, char **av) } /* - * The current blessed list was primed with the output of kernel version + * The original blessed list was primed with the output of kernel version * v4.15 with --core-reg-fixup and then later updated with new registers. + * (The --core-reg-fixup option and it's fixup function have been removed + * from the test, as it's unlikely to use this type of test on a kernel + * older than v5.2.) * * The blessed list is up to date with kernel version v6.4 (or so we hope) */ -- cgit From 4460a7dc77d00340c59f7e4252c65efd5fbe877b Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 31 Jul 2023 12:40:31 +0100 Subject: KVM: arm64: Remove redundant kvm_set_pfn_accessed() from user_mem_abort() The function user_mem_abort() calls kvm_release_pfn_clean(), which eventually calls kvm_set_page_accessed(). Therefore, remove the redundant call to kvm_set_pfn_accessed(). Signed-off-by: Fuad Tabba Reviewed-by: Shaoqin Huang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230731114110.2673451-1-tabba@google.com --- arch/arm64/kvm/mmu.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index d3b4feed460c..137b775d238b 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1541,7 +1541,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, out_unlock: read_unlock(&kvm->mmu_lock); - kvm_set_pfn_accessed(pfn); kvm_release_pfn_clean(pfn); return ret != -EAGAIN ? ret : 0; } -- cgit From 17da79e009c376523ab977a351a2a69bad8e847b Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 25 Jul 2023 16:41:32 +0800 Subject: KVM: arm64: selftests: Split get-reg-list test code Split the arch-neutral test code out of aarch64/get-reg-list.c into get-reg-list.c. To do this we invent a new make variable $(SPLIT_TESTS) which expects common parts to be in the KVM selftests root and the counterparts to have the same name, but be in $(ARCH_DIR). There's still some work to be done to de-aarch64 the common get-reg-list.c, but we leave that to the next patch to avoid modifying too much code while moving it. Signed-off-by: Andrew Jones Signed-off-by: Haibo Xu Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/Makefile | 12 +- tools/testing/selftests/kvm/aarch64/get-reg-list.c | 367 +------------------- tools/testing/selftests/kvm/get-reg-list.c | 377 +++++++++++++++++++++ 3 files changed, 398 insertions(+), 358 deletions(-) create mode 100644 tools/testing/selftests/kvm/get-reg-list.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index c692cc86e7da..95f180e711d5 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -140,7 +140,6 @@ TEST_GEN_PROGS_EXTENDED_x86_64 += x86_64/nx_huge_pages_test TEST_GEN_PROGS_aarch64 += aarch64/aarch32_id_regs TEST_GEN_PROGS_aarch64 += aarch64/arch_timer TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions -TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list TEST_GEN_PROGS_aarch64 += aarch64/hypercalls TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test TEST_GEN_PROGS_aarch64 += aarch64/psci_test @@ -152,6 +151,7 @@ TEST_GEN_PROGS_aarch64 += access_tracking_perf_test TEST_GEN_PROGS_aarch64 += demand_paging_test TEST_GEN_PROGS_aarch64 += dirty_log_test TEST_GEN_PROGS_aarch64 += dirty_log_perf_test +TEST_GEN_PROGS_aarch64 += get-reg-list TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus TEST_GEN_PROGS_aarch64 += kvm_page_table_test TEST_GEN_PROGS_aarch64 += memslot_modification_stress_test @@ -181,6 +181,8 @@ TEST_GEN_PROGS_riscv += kvm_page_table_test TEST_GEN_PROGS_riscv += set_memory_region_test TEST_GEN_PROGS_riscv += kvm_binary_stats_test +SPLIT_TESTS += get-reg-list + TEST_PROGS += $(TEST_PROGS_$(ARCH_DIR)) TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(ARCH_DIR)) TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(ARCH_DIR)) @@ -228,11 +230,14 @@ LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C)) LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S)) LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING)) LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ) +SPLIT_TESTS_TARGETS := $(patsubst %, $(OUTPUT)/%, $(SPLIT_TESTS)) +SPLIT_TESTS_OBJS := $(patsubst %, $(ARCH_DIR)/%.o, $(SPLIT_TESTS)) TEST_GEN_OBJ = $(patsubst %, %.o, $(TEST_GEN_PROGS)) TEST_GEN_OBJ += $(patsubst %, %.o, $(TEST_GEN_PROGS_EXTENDED)) TEST_DEP_FILES = $(patsubst %.o, %.d, $(TEST_GEN_OBJ)) TEST_DEP_FILES += $(patsubst %.o, %.d, $(LIBKVM_OBJS)) +TEST_DEP_FILES += $(patsubst %.o, %.d, $(SPLIT_TESTS_OBJS)) -include $(TEST_DEP_FILES) $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): %: %.o @@ -240,7 +245,10 @@ $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): %: %.o $(TEST_GEN_OBJ): $(OUTPUT)/%.o: %.c $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ -EXTRA_CLEAN += $(LIBKVM_OBJS) $(TEST_DEP_FILES) $(TEST_GEN_OBJ) cscope.* +$(SPLIT_TESTS_TARGETS): %: %.o $(SPLIT_TESTS_OBJS) + $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $^ $(LDLIBS) -o $@ + +EXTRA_CLEAN += $(LIBKVM_OBJS) $(TEST_DEP_FILES) $(TEST_GEN_OBJ) $(SPLIT_TESTS_OBJS) cscope.* x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)))) $(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c index 11630bbfb86b..f72f1a522ff4 100644 --- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c +++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c @@ -4,37 +4,17 @@ * * Copyright (C) 2020, Red Hat, Inc. * - * When attempting to migrate from a host with an older kernel to a host - * with a newer kernel we allow the newer kernel on the destination to - * list new registers with get-reg-list. We assume they'll be unused, at - * least until the guest reboots, and so they're relatively harmless. - * However, if the destination host with the newer kernel is missing - * registers which the source host with the older kernel has, then that's - * a regression in get-reg-list. This test checks for that regression by - * checking the current list against a blessed list. We should never have - * missing registers, but if new ones appear then they can probably be - * added to the blessed list. A completely new blessed list can be created - * by running the test with the --list command line argument. - * - * Note, the blessed list should be created from the oldest possible - * kernel. We can't go older than v5.2, though, because that's the first + * While the blessed list should be created from the oldest possible + * kernel, we can't go older than v5.2, though, because that's the first * release which includes df205b5c6328 ("KVM: arm64: Filter out invalid * core register IDs in KVM_GET_REG_LIST"). Without that commit the core * registers won't match expectations. */ #include -#include -#include -#include -#include -#include #include "kvm_util.h" #include "test_util.h" #include "processor.h" -static struct kvm_reg_list *reg_list; -static __u64 *blessed_reg, blessed_n; - struct feature_id_reg { __u64 reg; __u64 id_reg; @@ -63,55 +43,7 @@ static struct feature_id_reg feat_id_regs[] = { } }; -static struct vcpu_reg_list *vcpu_configs[]; -static int vcpu_configs_n; - -#define for_each_sublist(c, s) \ - for ((s) = &(c)->sublists[0]; (s)->regs; ++(s)) - -#define for_each_reg(i) \ - for ((i) = 0; (i) < reg_list->n; ++(i)) - -#define for_each_reg_filtered(i) \ - for_each_reg(i) \ - if (!filter_reg(reg_list->reg[i])) - -#define for_each_missing_reg(i) \ - for ((i) = 0; (i) < blessed_n; ++(i)) \ - if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i])) \ - if (check_supported_feat_reg(vcpu, blessed_reg[i])) - -#define for_each_new_reg(i) \ - for_each_reg_filtered(i) \ - if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i])) - -static const char *config_name(struct vcpu_reg_list *c) -{ - struct vcpu_reg_sublist *s; - int len = 0; - - if (c->name) - return c->name; - - for_each_sublist(c, s) - len += strlen(s->name) + 1; - - c->name = malloc(len); - - len = 0; - for_each_sublist(c, s) { - if (!strcmp(s->name, "base")) - continue; - strcat(c->name + len, s->name); - len += strlen(s->name) + 1; - c->name[len - 1] = '+'; - } - c->name[len - 1] = '\0'; - - return c->name; -} - -static bool filter_reg(__u64 reg) +bool filter_reg(__u64 reg) { /* * DEMUX register presence depends on the host's CLIDR_EL1. @@ -123,16 +55,6 @@ static bool filter_reg(__u64 reg) return false; } -static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg) -{ - int i; - - for (i = 0; i < nr_regs; ++i) - if (reg == regs[i]) - return true; - return false; -} - static bool check_supported_feat_reg(struct kvm_vcpu *vcpu, __u64 reg) { int i, ret; @@ -152,6 +74,11 @@ static bool check_supported_feat_reg(struct kvm_vcpu *vcpu, __u64 reg) return true; } +bool check_supported_reg(struct kvm_vcpu *vcpu, __u64 reg) +{ + return check_supported_feat_reg(vcpu, reg); +} + #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK) #define CORE_REGS_XX_NR_WORDS 2 @@ -235,7 +162,7 @@ static const char *sve_id_to_str(const char *prefix, __u64 id) return NULL; } -static void print_reg(const char *prefix, __u64 id) +void print_reg(const char *prefix, __u64 id) { unsigned op0, op1, crn, crm, op2; const char *reg_size = NULL; @@ -315,278 +242,6 @@ static void print_reg(const char *prefix, __u64 id) } } -static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init) -{ - struct vcpu_reg_sublist *s; - - for_each_sublist(c, s) - if (s->capability) - init->features[s->feature / 32] |= 1 << (s->feature % 32); -} - -static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) -{ - struct vcpu_reg_sublist *s; - int feature; - - for_each_sublist(c, s) { - if (s->finalize) { - feature = s->feature; - vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature); - } - } -} - -static void check_supported(struct vcpu_reg_list *c) -{ - struct vcpu_reg_sublist *s; - - for_each_sublist(c, s) { - if (!s->capability) - continue; - - __TEST_REQUIRE(kvm_has_cap(s->capability), - "%s: %s not available, skipping tests\n", - config_name(c), s->name); - } -} - -static bool print_list; -static bool print_filtered; - -static void run_test(struct vcpu_reg_list *c) -{ - struct kvm_vcpu_init init = { .target = -1, }; - int new_regs = 0, missing_regs = 0, i, n; - int failed_get = 0, failed_set = 0, failed_reject = 0; - struct kvm_vcpu *vcpu; - struct kvm_vm *vm; - struct vcpu_reg_sublist *s; - - check_supported(c); - - vm = vm_create_barebones(); - prepare_vcpu_init(c, &init); - vcpu = __vm_vcpu_add(vm, 0); - aarch64_vcpu_setup(vcpu, &init); - finalize_vcpu(vcpu, c); - - reg_list = vcpu_get_reg_list(vcpu); - - if (print_list || print_filtered) { - putchar('\n'); - for_each_reg(i) { - __u64 id = reg_list->reg[i]; - if ((print_list && !filter_reg(id)) || - (print_filtered && filter_reg(id))) - print_reg(config_name(c), id); - } - putchar('\n'); - return; - } - - /* - * We only test that we can get the register and then write back the - * same value. Some registers may allow other values to be written - * back, but others only allow some bits to be changed, and at least - * for ID registers set will fail if the value does not exactly match - * what was returned by get. If registers that allow other values to - * be written need to have the other values tested, then we should - * create a new set of tests for those in a new independent test - * executable. - */ - for_each_reg(i) { - uint8_t addr[2048 / 8]; - struct kvm_one_reg reg = { - .id = reg_list->reg[i], - .addr = (__u64)&addr, - }; - bool reject_reg = false; - int ret; - - ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr); - if (ret) { - printf("%s: Failed to get ", config_name(c)); - print_reg(config_name(c), reg.id); - putchar('\n'); - ++failed_get; - } - - /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */ - for_each_sublist(c, s) { - if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) { - reject_reg = true; - ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); - if (ret != -1 || errno != EPERM) { - printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno); - print_reg(config_name(c), reg.id); - putchar('\n'); - ++failed_reject; - } - break; - } - } - - if (!reject_reg) { - ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); - if (ret) { - printf("%s: Failed to set ", config_name(c)); - print_reg(config_name(c), reg.id); - putchar('\n'); - ++failed_set; - } - } - } - - for_each_sublist(c, s) - blessed_n += s->regs_n; - blessed_reg = calloc(blessed_n, sizeof(__u64)); - - n = 0; - for_each_sublist(c, s) { - for (i = 0; i < s->regs_n; ++i) - blessed_reg[n++] = s->regs[i]; - } - - for_each_new_reg(i) - ++new_regs; - - for_each_missing_reg(i) - ++missing_regs; - - if (new_regs || missing_regs) { - n = 0; - for_each_reg_filtered(i) - ++n; - - printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n); - printf("%s: Number registers: %5lld (includes %lld filtered registers)\n", - config_name(c), reg_list->n, reg_list->n - n); - } - - if (new_regs) { - printf("\n%s: There are %d new registers.\n" - "Consider adding them to the blessed reg " - "list with the following lines:\n\n", config_name(c), new_regs); - for_each_new_reg(i) - print_reg(config_name(c), reg_list->reg[i]); - putchar('\n'); - } - - if (missing_regs) { - printf("\n%s: There are %d missing registers.\n" - "The following lines are missing registers:\n\n", config_name(c), missing_regs); - for_each_missing_reg(i) - print_reg(config_name(c), blessed_reg[i]); - putchar('\n'); - } - - TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject, - "%s: There are %d missing registers; " - "%d registers failed get; %d registers failed set; %d registers failed reject", - config_name(c), missing_regs, failed_get, failed_set, failed_reject); - - pr_info("%s: PASS\n", config_name(c)); - blessed_n = 0; - free(blessed_reg); - free(reg_list); - kvm_vm_free(vm); -} - -static void help(void) -{ - struct vcpu_reg_list *c; - int i; - - printf( - "\n" - "usage: get-reg-list [--config=] [--list] [--list-filtered]\n\n" - " --config= Used to select a specific vcpu configuration for the test/listing\n" - " '' may be\n"); - - for (i = 0; i < vcpu_configs_n; ++i) { - c = vcpu_configs[i]; - printf( - " '%s'\n", config_name(c)); - } - - printf( - "\n" - " --list Print the register list rather than test it (requires --config)\n" - " --list-filtered Print registers that would normally be filtered out (requires --config)\n" - "\n" - ); -} - -static struct vcpu_reg_list *parse_config(const char *config) -{ - struct vcpu_reg_list *c; - int i; - - if (config[8] != '=') - help(), exit(1); - - for (i = 0; i < vcpu_configs_n; ++i) { - c = vcpu_configs[i]; - if (strcmp(config_name(c), &config[9]) == 0) - break; - } - - if (i == vcpu_configs_n) - help(), exit(1); - - return c; -} - -int main(int ac, char **av) -{ - struct vcpu_reg_list *c, *sel = NULL; - int i, ret = 0; - pid_t pid; - - for (i = 1; i < ac; ++i) { - if (strncmp(av[i], "--config", 8) == 0) - sel = parse_config(av[i]); - else if (strcmp(av[i], "--list") == 0) - print_list = true; - else if (strcmp(av[i], "--list-filtered") == 0) - print_filtered = true; - else if (strcmp(av[i], "--help") == 0 || strcmp(av[1], "-h") == 0) - help(), exit(0); - else - help(), exit(1); - } - - if (print_list || print_filtered) { - /* - * We only want to print the register list of a single config. - */ - if (!sel) - help(), exit(1); - } - - for (i = 0; i < vcpu_configs_n; ++i) { - c = vcpu_configs[i]; - if (sel && c != sel) - continue; - - pid = fork(); - - if (!pid) { - run_test(c); - exit(0); - } else { - int wstatus; - pid_t wpid = wait(&wstatus); - TEST_ASSERT(wpid == pid && WIFEXITED(wstatus), "wait: Unexpected return"); - if (WEXITSTATUS(wstatus) && WEXITSTATUS(wstatus) != KSFT_SKIP) - ret = KSFT_FAIL; - } - } - - return ret; -} - /* * The original blessed list was primed with the output of kernel version * v4.15 with --core-reg-fixup and then later updated with new registers. @@ -1073,7 +728,7 @@ static struct vcpu_reg_list pauth_pmu_config = { }, }; -static struct vcpu_reg_list *vcpu_configs[] = { +struct vcpu_reg_list *vcpu_configs[] = { &vregs_config, &vregs_pmu_config, &sve_config, @@ -1081,4 +736,4 @@ static struct vcpu_reg_list *vcpu_configs[] = { &pauth_config, &pauth_pmu_config, }; -static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs); +int vcpu_configs_n = ARRAY_SIZE(vcpu_configs); diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c new file mode 100644 index 000000000000..a29e548643d1 --- /dev/null +++ b/tools/testing/selftests/kvm/get-reg-list.c @@ -0,0 +1,377 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Check for KVM_GET_REG_LIST regressions. + * + * Copyright (C) 2020, Red Hat, Inc. + * + * When attempting to migrate from a host with an older kernel to a host + * with a newer kernel we allow the newer kernel on the destination to + * list new registers with get-reg-list. We assume they'll be unused, at + * least until the guest reboots, and so they're relatively harmless. + * However, if the destination host with the newer kernel is missing + * registers which the source host with the older kernel has, then that's + * a regression in get-reg-list. This test checks for that regression by + * checking the current list against a blessed list. We should never have + * missing registers, but if new ones appear then they can probably be + * added to the blessed list. A completely new blessed list can be created + * by running the test with the --list command line argument. + * + * The blessed list should be created from the oldest possible kernel. + */ +#include +#include +#include +#include +#include +#include +#include "kvm_util.h" +#include "test_util.h" +#include "processor.h" + +static struct kvm_reg_list *reg_list; +static __u64 *blessed_reg, blessed_n; + +extern struct vcpu_reg_list *vcpu_configs[]; +extern int vcpu_configs_n; + +#define for_each_sublist(c, s) \ + for ((s) = &(c)->sublists[0]; (s)->regs; ++(s)) + +#define for_each_reg(i) \ + for ((i) = 0; (i) < reg_list->n; ++(i)) + +#define for_each_reg_filtered(i) \ + for_each_reg(i) \ + if (!filter_reg(reg_list->reg[i])) + +#define for_each_missing_reg(i) \ + for ((i) = 0; (i) < blessed_n; ++(i)) \ + if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i])) \ + if (check_supported_reg(vcpu, blessed_reg[i])) + +#define for_each_new_reg(i) \ + for_each_reg_filtered(i) \ + if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i])) + +static const char *config_name(struct vcpu_reg_list *c) +{ + struct vcpu_reg_sublist *s; + int len = 0; + + if (c->name) + return c->name; + + for_each_sublist(c, s) + len += strlen(s->name) + 1; + + c->name = malloc(len); + + len = 0; + for_each_sublist(c, s) { + if (!strcmp(s->name, "base")) + continue; + strcat(c->name + len, s->name); + len += strlen(s->name) + 1; + c->name[len - 1] = '+'; + } + c->name[len - 1] = '\0'; + + return c->name; +} + +bool __weak check_supported_reg(struct kvm_vcpu *vcpu, __u64 reg) +{ + return true; +} + +bool __weak filter_reg(__u64 reg) +{ + return false; +} + +static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg) +{ + int i; + + for (i = 0; i < nr_regs; ++i) + if (reg == regs[i]) + return true; + return false; +} + +void __weak print_reg(const char *prefix, __u64 id) +{ + printf("\t0x%llx,\n", id); +} + +static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init) +{ + struct vcpu_reg_sublist *s; + + for_each_sublist(c, s) + if (s->capability) + init->features[s->feature / 32] |= 1 << (s->feature % 32); +} + +static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) +{ + struct vcpu_reg_sublist *s; + int feature; + + for_each_sublist(c, s) { + if (s->finalize) { + feature = s->feature; + vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature); + } + } +} + +static void check_supported(struct vcpu_reg_list *c) +{ + struct vcpu_reg_sublist *s; + + for_each_sublist(c, s) { + if (!s->capability) + continue; + + __TEST_REQUIRE(kvm_has_cap(s->capability), + "%s: %s not available, skipping tests\n", + config_name(c), s->name); + } +} + +static bool print_list; +static bool print_filtered; + +static void run_test(struct vcpu_reg_list *c) +{ + struct kvm_vcpu_init init = { .target = -1, }; + int new_regs = 0, missing_regs = 0, i, n; + int failed_get = 0, failed_set = 0, failed_reject = 0; + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + struct vcpu_reg_sublist *s; + + check_supported(c); + + vm = vm_create_barebones(); + prepare_vcpu_init(c, &init); + vcpu = __vm_vcpu_add(vm, 0); + aarch64_vcpu_setup(vcpu, &init); + finalize_vcpu(vcpu, c); + + reg_list = vcpu_get_reg_list(vcpu); + + if (print_list || print_filtered) { + putchar('\n'); + for_each_reg(i) { + __u64 id = reg_list->reg[i]; + if ((print_list && !filter_reg(id)) || + (print_filtered && filter_reg(id))) + print_reg(config_name(c), id); + } + putchar('\n'); + return; + } + + /* + * We only test that we can get the register and then write back the + * same value. Some registers may allow other values to be written + * back, but others only allow some bits to be changed, and at least + * for ID registers set will fail if the value does not exactly match + * what was returned by get. If registers that allow other values to + * be written need to have the other values tested, then we should + * create a new set of tests for those in a new independent test + * executable. + */ + for_each_reg(i) { + uint8_t addr[2048 / 8]; + struct kvm_one_reg reg = { + .id = reg_list->reg[i], + .addr = (__u64)&addr, + }; + bool reject_reg = false; + int ret; + + ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr); + if (ret) { + printf("%s: Failed to get ", config_name(c)); + print_reg(config_name(c), reg.id); + putchar('\n'); + ++failed_get; + } + + /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */ + for_each_sublist(c, s) { + if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) { + reject_reg = true; + ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); + if (ret != -1 || errno != EPERM) { + printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno); + print_reg(config_name(c), reg.id); + putchar('\n'); + ++failed_reject; + } + break; + } + } + + if (!reject_reg) { + ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); + if (ret) { + printf("%s: Failed to set ", config_name(c)); + print_reg(config_name(c), reg.id); + putchar('\n'); + ++failed_set; + } + } + } + + for_each_sublist(c, s) + blessed_n += s->regs_n; + blessed_reg = calloc(blessed_n, sizeof(__u64)); + + n = 0; + for_each_sublist(c, s) { + for (i = 0; i < s->regs_n; ++i) + blessed_reg[n++] = s->regs[i]; + } + + for_each_new_reg(i) + ++new_regs; + + for_each_missing_reg(i) + ++missing_regs; + + if (new_regs || missing_regs) { + n = 0; + for_each_reg_filtered(i) + ++n; + + printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n); + printf("%s: Number registers: %5lld (includes %lld filtered registers)\n", + config_name(c), reg_list->n, reg_list->n - n); + } + + if (new_regs) { + printf("\n%s: There are %d new registers.\n" + "Consider adding them to the blessed reg " + "list with the following lines:\n\n", config_name(c), new_regs); + for_each_new_reg(i) + print_reg(config_name(c), reg_list->reg[i]); + putchar('\n'); + } + + if (missing_regs) { + printf("\n%s: There are %d missing registers.\n" + "The following lines are missing registers:\n\n", config_name(c), missing_regs); + for_each_missing_reg(i) + print_reg(config_name(c), blessed_reg[i]); + putchar('\n'); + } + + TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject, + "%s: There are %d missing registers; " + "%d registers failed get; %d registers failed set; %d registers failed reject", + config_name(c), missing_regs, failed_get, failed_set, failed_reject); + + pr_info("%s: PASS\n", config_name(c)); + blessed_n = 0; + free(blessed_reg); + free(reg_list); + kvm_vm_free(vm); +} + +static void help(void) +{ + struct vcpu_reg_list *c; + int i; + + printf( + "\n" + "usage: get-reg-list [--config=] [--list] [--list-filtered]\n\n" + " --config= Used to select a specific vcpu configuration for the test/listing\n" + " '' may be\n"); + + for (i = 0; i < vcpu_configs_n; ++i) { + c = vcpu_configs[i]; + printf( + " '%s'\n", config_name(c)); + } + + printf( + "\n" + " --list Print the register list rather than test it (requires --config)\n" + " --list-filtered Print registers that would normally be filtered out (requires --config)\n" + "\n" + ); +} + +static struct vcpu_reg_list *parse_config(const char *config) +{ + struct vcpu_reg_list *c = NULL; + int i; + + if (config[8] != '=') + help(), exit(1); + + for (i = 0; i < vcpu_configs_n; ++i) { + c = vcpu_configs[i]; + if (strcmp(config_name(c), &config[9]) == 0) + break; + } + + if (i == vcpu_configs_n) + help(), exit(1); + + return c; +} + +int main(int ac, char **av) +{ + struct vcpu_reg_list *c, *sel = NULL; + int i, ret = 0; + pid_t pid; + + for (i = 1; i < ac; ++i) { + if (strncmp(av[i], "--config", 8) == 0) + sel = parse_config(av[i]); + else if (strcmp(av[i], "--list") == 0) + print_list = true; + else if (strcmp(av[i], "--list-filtered") == 0) + print_filtered = true; + else if (strcmp(av[i], "--help") == 0 || strcmp(av[1], "-h") == 0) + help(), exit(0); + else + help(), exit(1); + } + + if (print_list || print_filtered) { + /* + * We only want to print the register list of a single config. + */ + if (!sel) + help(), exit(1); + } + + for (i = 0; i < vcpu_configs_n; ++i) { + c = vcpu_configs[i]; + if (sel && c != sel) + continue; + + pid = fork(); + + if (!pid) { + run_test(c); + exit(0); + } else { + int wstatus; + pid_t wpid = wait(&wstatus); + TEST_ASSERT(wpid == pid && WIFEXITED(wstatus), "wait: Unexpected return"); + if (WEXITSTATUS(wstatus) && WEXITSTATUS(wstatus) != KSFT_SKIP) + ret = KSFT_FAIL; + } + } + + return ret; +} -- cgit From be4c58060c3efd1f8dff59e59ff8e69d08f00560 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 25 Jul 2023 16:41:33 +0800 Subject: KVM: arm64: selftests: Finish generalizing get-reg-list Add some unfortunate #ifdeffery to ensure the common get-reg-list.c can be compiled and run with other architectures. The next architecture to support get-reg-list should now only need to provide $(ARCH_DIR)/get-reg-list.c where arch-specific print_reg() and vcpu_configs[] get defined. Signed-off-by: Andrew Jones Signed-off-by: Haibo Xu Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/get-reg-list.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c index a29e548643d1..853cbf470da2 100644 --- a/tools/testing/selftests/kvm/get-reg-list.c +++ b/tools/testing/selftests/kvm/get-reg-list.c @@ -104,6 +104,7 @@ void __weak print_reg(const char *prefix, __u64 id) printf("\t0x%llx,\n", id); } +#ifdef __aarch64__ static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init) { struct vcpu_reg_sublist *s; @@ -126,6 +127,25 @@ static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) } } +static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm) +{ + struct kvm_vcpu_init init = { .target = -1, }; + struct kvm_vcpu *vcpu; + + prepare_vcpu_init(c, &init); + vcpu = __vm_vcpu_add(vm, 0); + aarch64_vcpu_setup(vcpu, &init); + finalize_vcpu(vcpu, c); + + return vcpu; +} +#else +static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm) +{ + return __vm_vcpu_add(vm, 0); +} +#endif + static void check_supported(struct vcpu_reg_list *c) { struct vcpu_reg_sublist *s; @@ -145,7 +165,6 @@ static bool print_filtered; static void run_test(struct vcpu_reg_list *c) { - struct kvm_vcpu_init init = { .target = -1, }; int new_regs = 0, missing_regs = 0, i, n; int failed_get = 0, failed_set = 0, failed_reject = 0; struct kvm_vcpu *vcpu; @@ -155,10 +174,7 @@ static void run_test(struct vcpu_reg_list *c) check_supported(c); vm = vm_create_barebones(); - prepare_vcpu_init(c, &init); - vcpu = __vm_vcpu_add(vm, 0); - aarch64_vcpu_setup(vcpu, &init); - finalize_vcpu(vcpu, c); + vcpu = vcpu_config_get_vcpu(c, vm); reg_list = vcpu_get_reg_list(vcpu); -- cgit From 90a6bcbc542d0d61f50eeb85952e4fc49dbf3841 Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Tue, 25 Jul 2023 16:41:34 +0800 Subject: KVM: arm64: selftests: Move reject_set check logic to a function No functional changes. Just move the reject_set check logic to a function so we can check for a specific errno. This is a preparation for support reject_set in riscv. Suggested-by: Andrew Jones Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/aarch64/get-reg-list.c | 5 +++++ tools/testing/selftests/kvm/get-reg-list.c | 7 ++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c index f72f1a522ff4..f8ebc058b191 100644 --- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c +++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c @@ -79,6 +79,11 @@ bool check_supported_reg(struct kvm_vcpu *vcpu, __u64 reg) return check_supported_feat_reg(vcpu, reg); } +bool check_reject_set(int err) +{ + return err == EPERM; +} + #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK) #define CORE_REGS_XX_NR_WORDS 2 diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c index 853cbf470da2..1df94e8e9ed5 100644 --- a/tools/testing/selftests/kvm/get-reg-list.c +++ b/tools/testing/selftests/kvm/get-reg-list.c @@ -104,6 +104,11 @@ void __weak print_reg(const char *prefix, __u64 id) printf("\t0x%llx,\n", id); } +bool __weak check_reject_set(int err) +{ + return true; +} + #ifdef __aarch64__ static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init) { @@ -222,7 +227,7 @@ static void run_test(struct vcpu_reg_list *c) if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) { reject_reg = true; ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); - if (ret != -1 || errno != EPERM) { + if (ret != -1 || !check_reject_set(errno)) { printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno); print_reg(config_name(c), reg.id); putchar('\n'); -- cgit From e85660338f2b1f20981f2a343a33db75255942a7 Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Tue, 25 Jul 2023 16:41:35 +0800 Subject: KVM: arm64: selftests: Move finalize_vcpu back to run_test No functional changes. Just move the finalize_vcpu call back to run_test and do weak function trick to prepare for the opration in riscv. Suggested-by: Andrew Jones Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/aarch64/get-reg-list.c | 13 +++++++++++++ tools/testing/selftests/kvm/get-reg-list.c | 22 +++++----------------- .../testing/selftests/kvm/include/kvm_util_base.h | 3 +++ 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c index f8ebc058b191..709d7d721760 100644 --- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c +++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c @@ -84,6 +84,19 @@ bool check_reject_set(int err) return err == EPERM; } +void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) +{ + struct vcpu_reg_sublist *s; + int feature; + + for_each_sublist(c, s) { + if (s->finalize) { + feature = s->feature; + vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature); + } + } +} + #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK) #define CORE_REGS_XX_NR_WORDS 2 diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c index 1df94e8e9ed5..43a919f2208f 100644 --- a/tools/testing/selftests/kvm/get-reg-list.c +++ b/tools/testing/selftests/kvm/get-reg-list.c @@ -34,9 +34,6 @@ static __u64 *blessed_reg, blessed_n; extern struct vcpu_reg_list *vcpu_configs[]; extern int vcpu_configs_n; -#define for_each_sublist(c, s) \ - for ((s) = &(c)->sublists[0]; (s)->regs; ++(s)) - #define for_each_reg(i) \ for ((i) = 0; (i) < reg_list->n; ++(i)) @@ -109,6 +106,10 @@ bool __weak check_reject_set(int err) return true; } +void __weak finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) +{ +} + #ifdef __aarch64__ static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init) { @@ -119,19 +120,6 @@ static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *ini init->features[s->feature / 32] |= 1 << (s->feature % 32); } -static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) -{ - struct vcpu_reg_sublist *s; - int feature; - - for_each_sublist(c, s) { - if (s->finalize) { - feature = s->feature; - vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature); - } - } -} - static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm) { struct kvm_vcpu_init init = { .target = -1, }; @@ -140,7 +128,6 @@ static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm prepare_vcpu_init(c, &init); vcpu = __vm_vcpu_add(vm, 0); aarch64_vcpu_setup(vcpu, &init); - finalize_vcpu(vcpu, c); return vcpu; } @@ -180,6 +167,7 @@ static void run_test(struct vcpu_reg_list *c) vm = vm_create_barebones(); vcpu = vcpu_config_get_vcpu(c, vm); + finalize_vcpu(vcpu, c); reg_list = vcpu_get_reg_list(vcpu); diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index b5189c7df482..bc7c08a09d30 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -141,6 +141,9 @@ struct vcpu_reg_list { struct vcpu_reg_sublist sublists[]; }; +#define for_each_sublist(c, s) \ + for ((s) = &(c)->sublists[0]; (s)->regs; ++(s)) + #define kvm_for_each_vcpu(vm, i, vcpu) \ for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \ if (!((vcpu) = vm->vcpus[i])) \ -- cgit From c47467712e8be10805aed4db5626a39aedc784bb Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Tue, 25 Jul 2023 16:41:36 +0800 Subject: KVM: selftests: Only do get/set tests on present blessed list Only do the get/set tests on present and blessed registers since we don't know the capabilities of any new ones. Suggested-by: Andrew Jones Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/get-reg-list.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c index 43a919f2208f..2232620fb797 100644 --- a/tools/testing/selftests/kvm/get-reg-list.c +++ b/tools/testing/selftests/kvm/get-reg-list.c @@ -50,6 +50,10 @@ extern int vcpu_configs_n; for_each_reg_filtered(i) \ if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i])) +#define for_each_present_blessed_reg(i) \ + for_each_reg(i) \ + if (find_reg(blessed_reg, blessed_n, reg_list->reg[i])) + static const char *config_name(struct vcpu_reg_list *c) { struct vcpu_reg_sublist *s; @@ -183,6 +187,16 @@ static void run_test(struct vcpu_reg_list *c) return; } + for_each_sublist(c, s) + blessed_n += s->regs_n; + blessed_reg = calloc(blessed_n, sizeof(__u64)); + + n = 0; + for_each_sublist(c, s) { + for (i = 0; i < s->regs_n; ++i) + blessed_reg[n++] = s->regs[i]; + } + /* * We only test that we can get the register and then write back the * same value. Some registers may allow other values to be written @@ -192,8 +206,11 @@ static void run_test(struct vcpu_reg_list *c) * be written need to have the other values tested, then we should * create a new set of tests for those in a new independent test * executable. + * + * Only do the get/set tests on present, blessed list registers, + * since we don't know the capabilities of any new registers. */ - for_each_reg(i) { + for_each_present_blessed_reg(i) { uint8_t addr[2048 / 8]; struct kvm_one_reg reg = { .id = reg_list->reg[i], @@ -236,16 +253,6 @@ static void run_test(struct vcpu_reg_list *c) } } - for_each_sublist(c, s) - blessed_n += s->regs_n; - blessed_reg = calloc(blessed_n, sizeof(__u64)); - - n = 0; - for_each_sublist(c, s) { - for (i = 0; i < s->regs_n; ++i) - blessed_reg[n++] = s->regs[i]; - } - for_each_new_reg(i) ++new_regs; -- cgit From cbc0daa67c62bcfeb45cccfc821216549566fb40 Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Tue, 25 Jul 2023 16:41:37 +0800 Subject: KVM: selftests: Add skip_set facility to get_reg_list test Add new skips_set members to vcpu_reg_sublist so as to skip set operation on some registers. Suggested-by: Andrew Jones Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/get-reg-list.c | 20 ++++++++++++++------ tools/testing/selftests/kvm/include/kvm_util_base.h | 2 ++ 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c index 2232620fb797..be7bf5224434 100644 --- a/tools/testing/selftests/kvm/get-reg-list.c +++ b/tools/testing/selftests/kvm/get-reg-list.c @@ -163,6 +163,7 @@ static void run_test(struct vcpu_reg_list *c) { int new_regs = 0, missing_regs = 0, i, n; int failed_get = 0, failed_set = 0, failed_reject = 0; + int skipped_set = 0; struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct vcpu_reg_sublist *s; @@ -216,7 +217,7 @@ static void run_test(struct vcpu_reg_list *c) .id = reg_list->reg[i], .addr = (__u64)&addr, }; - bool reject_reg = false; + bool reject_reg = false, skip_reg = false; int ret; ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr); @@ -227,8 +228,8 @@ static void run_test(struct vcpu_reg_list *c) ++failed_get; } - /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */ for_each_sublist(c, s) { + /* rejects_set registers are rejected for set operation */ if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) { reject_reg = true; ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); @@ -240,9 +241,16 @@ static void run_test(struct vcpu_reg_list *c) } break; } + + /* skips_set registers are skipped for set operation */ + if (s->skips_set && find_reg(s->skips_set, s->skips_set_n, reg.id)) { + skip_reg = true; + ++skipped_set; + break; + } } - if (!reject_reg) { + if (!reject_reg && !skip_reg) { ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); if (ret) { printf("%s: Failed to set ", config_name(c)); @@ -287,9 +295,9 @@ static void run_test(struct vcpu_reg_list *c) } TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject, - "%s: There are %d missing registers; " - "%d registers failed get; %d registers failed set; %d registers failed reject", - config_name(c), missing_regs, failed_get, failed_set, failed_reject); + "%s: There are %d missing registers; %d registers failed get; " + "%d registers failed set; %d registers failed reject; %d registers skipped set", + config_name(c), missing_regs, failed_get, failed_set, failed_reject, skipped_set); pr_info("%s: PASS\n", config_name(c)); blessed_n = 0; diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index bc7c08a09d30..a18db6a7b3cf 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -134,6 +134,8 @@ struct vcpu_reg_sublist { __u64 regs_n; __u64 *rejects_set; __u64 rejects_set_n; + __u64 *skips_set; + __u64 skips_set_n; }; struct vcpu_reg_list { -- cgit From 031f9efafc08d68b1b672e83ee73f6ea5c69c2ef Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Tue, 25 Jul 2023 16:41:38 +0800 Subject: KVM: riscv: Add KVM_GET_REG_LIST API support KVM_GET_REG_LIST API will return all registers that are available to KVM_GET/SET_ONE_REG APIs. It's very useful to identify some platform regression issue during VM migration. Since this API was already supported on arm64, it is straightforward to enable it on riscv with similar code structure. Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- Documentation/virt/kvm/api.rst | 2 +- arch/riscv/include/asm/kvm_host.h | 3 + arch/riscv/kvm/vcpu.c | 18 ++ arch/riscv/kvm/vcpu_onereg.c | 366 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 388 insertions(+), 1 deletion(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 3249fb56cc69..660d9ca7a251 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -3501,7 +3501,7 @@ VCPU matching underlying host. --------------------- :Capability: basic -:Architectures: arm64, mips +:Architectures: arm64, mips, riscv :Type: vcpu ioctl :Parameters: struct kvm_reg_list (in/out) :Returns: 0 on success; -1 on error diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 55bc7bdbff48..1ebf20dfbaa6 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -338,6 +338,9 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch); void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu); +unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu, + u64 __user *uindices); int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 452d6548e951..82229db1ce73 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -254,6 +254,24 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = kvm_riscv_vcpu_get_reg(vcpu, ®); break; } + case KVM_GET_REG_LIST: { + struct kvm_reg_list __user *user_list = argp; + struct kvm_reg_list reg_list; + unsigned int n; + + r = -EFAULT; + if (copy_from_user(®_list, user_list, sizeof(reg_list))) + break; + n = reg_list.n; + reg_list.n = kvm_riscv_vcpu_num_regs(vcpu); + if (copy_to_user(user_list, ®_list, sizeof(reg_list))) + break; + r = -E2BIG; + if (n < reg_list.n) + break; + r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg); + break; + } default: break; } diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index 9fee1c176fbb..1b7e9fa265cb 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -622,6 +622,372 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, return 0; } +static int copy_config_reg_indices(const struct kvm_vcpu *vcpu, + u64 __user *uindices) +{ + int n = 0; + + for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long); + i++) { + u64 size; + u64 reg; + + /* + * Avoid reporting config reg if the corresponding extension + * was not available. + */ + if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) && + !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) + continue; + else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) && + !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ)) + continue; + + size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; + reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i; + + if (uindices) { + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + + n++; + } + + return n; +} + +static unsigned long num_config_regs(const struct kvm_vcpu *vcpu) +{ + return copy_config_reg_indices(vcpu, NULL); +} + +static inline unsigned long num_core_regs(void) +{ + return sizeof(struct kvm_riscv_core) / sizeof(unsigned long); +} + +static int copy_core_reg_indices(u64 __user *uindices) +{ + int n = num_core_regs(); + + for (int i = 0; i < n; i++) { + u64 size = IS_ENABLED(CONFIG_32BIT) ? + KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; + u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i; + + if (uindices) { + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + } + + return n; +} + +static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu) +{ + unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long); + + if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) + n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long); + + return n; +} + +static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu, + u64 __user *uindices) +{ + int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long); + int n2 = 0; + + /* copy general csr regs */ + for (int i = 0; i < n1; i++) { + u64 size = IS_ENABLED(CONFIG_32BIT) ? + KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; + u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR | + KVM_REG_RISCV_CSR_GENERAL | i; + + if (uindices) { + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + } + + /* copy AIA csr regs */ + if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) { + n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long); + + for (int i = 0; i < n2; i++) { + u64 size = IS_ENABLED(CONFIG_32BIT) ? + KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; + u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR | + KVM_REG_RISCV_CSR_AIA | i; + + if (uindices) { + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + } + } + + return n1 + n2; +} + +static inline unsigned long num_timer_regs(void) +{ + return sizeof(struct kvm_riscv_timer) / sizeof(u64); +} + +static int copy_timer_reg_indices(u64 __user *uindices) +{ + int n = num_timer_regs(); + + for (int i = 0; i < n; i++) { + u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | + KVM_REG_RISCV_TIMER | i; + + if (uindices) { + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + } + + return n; +} + +static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu) +{ + const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + + if (riscv_isa_extension_available(vcpu->arch.isa, f)) + return sizeof(cntx->fp.f) / sizeof(u32); + else + return 0; +} + +static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu, + u64 __user *uindices) +{ + int n = num_fp_f_regs(vcpu); + + for (int i = 0; i < n; i++) { + u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | + KVM_REG_RISCV_FP_F | i; + + if (uindices) { + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + } + + return n; +} + +static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu) +{ + const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + + if (riscv_isa_extension_available(vcpu->arch.isa, d)) + return sizeof(cntx->fp.d.f) / sizeof(u64) + 1; + else + return 0; +} + +static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu, + u64 __user *uindices) +{ + int i; + int n = num_fp_d_regs(vcpu); + u64 reg; + + /* copy fp.d.f indices */ + for (i = 0; i < n-1; i++) { + reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | + KVM_REG_RISCV_FP_D | i; + + if (uindices) { + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + } + + /* copy fp.d.fcsr indices */ + reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i; + if (uindices) { + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + + return n; +} + +static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu, + u64 __user *uindices) +{ + unsigned int n = 0; + unsigned long isa_ext; + + for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) { + u64 size = IS_ENABLED(CONFIG_32BIT) ? + KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; + u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i; + + isa_ext = kvm_isa_ext_arr[i]; + if (!__riscv_isa_extension_available(vcpu->arch.isa, isa_ext)) + continue; + + if (uindices) { + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + + n++; + } + + return n; +} + +static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu) +{ + return copy_isa_ext_reg_indices(vcpu, NULL);; +} + +static inline unsigned long num_sbi_ext_regs(void) +{ + /* + * number of KVM_REG_RISCV_SBI_SINGLE + + * 2 x (number of KVM_REG_RISCV_SBI_MULTI) + */ + return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1); +} + +static int copy_sbi_ext_reg_indices(u64 __user *uindices) +{ + int n; + + /* copy KVM_REG_RISCV_SBI_SINGLE */ + n = KVM_RISCV_SBI_EXT_MAX; + for (int i = 0; i < n; i++) { + u64 size = IS_ENABLED(CONFIG_32BIT) ? + KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; + u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT | + KVM_REG_RISCV_SBI_SINGLE | i; + + if (uindices) { + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + } + + /* copy KVM_REG_RISCV_SBI_MULTI */ + n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1; + for (int i = 0; i < n; i++) { + u64 size = IS_ENABLED(CONFIG_32BIT) ? + KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; + u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT | + KVM_REG_RISCV_SBI_MULTI_EN | i; + + if (uindices) { + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + + reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT | + KVM_REG_RISCV_SBI_MULTI_DIS | i; + + if (uindices) { + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + } + + return num_sbi_ext_regs(); +} + +/* + * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG + * + * This is for all registers. + */ +unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu) +{ + unsigned long res = 0; + + res += num_config_regs(vcpu); + res += num_core_regs(); + res += num_csr_regs(vcpu); + res += num_timer_regs(); + res += num_fp_f_regs(vcpu); + res += num_fp_d_regs(vcpu); + res += num_isa_ext_regs(vcpu); + res += num_sbi_ext_regs(); + + return res; +} + +/* + * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers. + */ +int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu, + u64 __user *uindices) +{ + int ret; + + ret = copy_config_reg_indices(vcpu, uindices); + if (ret < 0) + return ret; + uindices += ret; + + ret = copy_core_reg_indices(uindices); + if (ret < 0) + return ret; + uindices += ret; + + ret = copy_csr_reg_indices(vcpu, uindices); + if (ret < 0) + return ret; + uindices += ret; + + ret = copy_timer_reg_indices(uindices); + if (ret < 0) + return ret; + uindices += ret; + + ret = copy_fp_f_reg_indices(vcpu, uindices); + if (ret < 0) + return ret; + uindices += ret; + + ret = copy_fp_d_reg_indices(vcpu, uindices); + if (ret < 0) + return ret; + uindices += ret; + + ret = copy_isa_ext_reg_indices(vcpu, uindices); + if (ret < 0) + return ret; + uindices += ret; + + ret = copy_sbi_ext_reg_indices(uindices); + if (ret < 0) + return ret; + + return 0; +} + int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { -- cgit From 477069398ed6e0498ee243e799cb6c68baf6ccb8 Mon Sep 17 00:00:00 2001 From: Haibo Xu Date: Tue, 25 Jul 2023 16:41:39 +0800 Subject: KVM: riscv: selftests: Add get-reg-list test get-reg-list test is used to check for KVM registers regressions during VM migration which happens when destination host kernel missing registers that the source host kernel has. The blessed list registers was created by running on v6.5-rc3 Signed-off-by: Haibo Xu Reviewed-by: Andrew Jones Signed-off-by: Anup Patel --- tools/testing/selftests/kvm/Makefile | 1 + .../selftests/kvm/include/riscv/processor.h | 3 + tools/testing/selftests/kvm/riscv/get-reg-list.c | 872 +++++++++++++++++++++ 3 files changed, 876 insertions(+) create mode 100644 tools/testing/selftests/kvm/riscv/get-reg-list.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 95f180e711d5..72ba48fcdc66 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -176,6 +176,7 @@ TEST_GEN_PROGS_s390x += kvm_binary_stats_test TEST_GEN_PROGS_riscv += demand_paging_test TEST_GEN_PROGS_riscv += dirty_log_test +TEST_GEN_PROGS_riscv += get-reg-list TEST_GEN_PROGS_riscv += kvm_create_max_vcpus TEST_GEN_PROGS_riscv += kvm_page_table_test TEST_GEN_PROGS_riscv += set_memory_region_test diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h index d00d213c3805..5b62a3d2aa9b 100644 --- a/tools/testing/selftests/kvm/include/riscv/processor.h +++ b/tools/testing/selftests/kvm/include/riscv/processor.h @@ -38,6 +38,9 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx, KVM_REG_RISCV_TIMER_REG(name), \ KVM_REG_SIZE_U64) +#define RISCV_ISA_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_ISA_EXT, \ + idx, KVM_REG_SIZE_ULONG) + /* L3 index Bit[47:39] */ #define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL #define PGTBL_L3_INDEX_SHIFT 39 diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c new file mode 100644 index 000000000000..d8ecacd03ecf --- /dev/null +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c @@ -0,0 +1,872 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Check for KVM_GET_REG_LIST regressions. + * + * Copyright (c) 2023 Intel Corporation + * + */ +#include +#include "kvm_util.h" +#include "test_util.h" +#include "processor.h" + +#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK) + +bool filter_reg(__u64 reg) +{ + /* + * Some ISA extensions are optional and not present on all host, + * but they can't be disabled through ISA_EXT registers when present. + * So, to make life easy, just filtering out these kind of registers. + */ + switch (reg & ~REG_MASK) { + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM: + return true; + default: + break; + } + + return false; +} + +bool check_reject_set(int err) +{ + return err == EINVAL; +} + +static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext) +{ + int ret; + unsigned long value; + + ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value); + if (ret) { + printf("Failed to get ext %d", ext); + return false; + } + + return !!value; +} + +void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) +{ + struct vcpu_reg_sublist *s; + + /* + * Disable all extensions which were enabled by default + * if they were available in the risc-v host. + */ + for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) + __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0); + + for_each_sublist(c, s) { + if (!s->feature) + continue; + + /* Try to enable the desired extension */ + __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(s->feature), 1); + + /* Double check whether the desired extension was enabled */ + __TEST_REQUIRE(vcpu_has_ext(vcpu, s->feature), + "%s not available, skipping tests\n", s->name); + } +} + +static const char *config_id_to_str(__u64 id) +{ + /* reg_off is the offset into struct kvm_riscv_config */ + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CONFIG); + + switch (reg_off) { + case KVM_REG_RISCV_CONFIG_REG(isa): + return "KVM_REG_RISCV_CONFIG_REG(isa)"; + case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): + return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)"; + case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): + return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)"; + case KVM_REG_RISCV_CONFIG_REG(mvendorid): + return "KVM_REG_RISCV_CONFIG_REG(mvendorid)"; + case KVM_REG_RISCV_CONFIG_REG(marchid): + return "KVM_REG_RISCV_CONFIG_REG(marchid)"; + case KVM_REG_RISCV_CONFIG_REG(mimpid): + return "KVM_REG_RISCV_CONFIG_REG(mimpid)"; + case KVM_REG_RISCV_CONFIG_REG(satp_mode): + return "KVM_REG_RISCV_CONFIG_REG(satp_mode)"; + } + + /* + * Config regs would grow regularly with new pseudo reg added, so + * just show raw id to indicate a new pseudo config reg. + */ + return strdup_printf("KVM_REG_RISCV_CONFIG_REG(%lld) /* UNKNOWN */", reg_off); +} + +static const char *core_id_to_str(const char *prefix, __u64 id) +{ + /* reg_off is the offset into struct kvm_riscv_core */ + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CORE); + + switch (reg_off) { + case KVM_REG_RISCV_CORE_REG(regs.pc): + return "KVM_REG_RISCV_CORE_REG(regs.pc)"; + case KVM_REG_RISCV_CORE_REG(regs.ra): + return "KVM_REG_RISCV_CORE_REG(regs.ra)"; + case KVM_REG_RISCV_CORE_REG(regs.sp): + return "KVM_REG_RISCV_CORE_REG(regs.sp)"; + case KVM_REG_RISCV_CORE_REG(regs.gp): + return "KVM_REG_RISCV_CORE_REG(regs.gp)"; + case KVM_REG_RISCV_CORE_REG(regs.tp): + return "KVM_REG_RISCV_CORE_REG(regs.tp)"; + case KVM_REG_RISCV_CORE_REG(regs.t0) ... KVM_REG_RISCV_CORE_REG(regs.t2): + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)", + reg_off - KVM_REG_RISCV_CORE_REG(regs.t0)); + case KVM_REG_RISCV_CORE_REG(regs.s0) ... KVM_REG_RISCV_CORE_REG(regs.s1): + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)", + reg_off - KVM_REG_RISCV_CORE_REG(regs.s0)); + case KVM_REG_RISCV_CORE_REG(regs.a0) ... KVM_REG_RISCV_CORE_REG(regs.a7): + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.a%lld)", + reg_off - KVM_REG_RISCV_CORE_REG(regs.a0)); + case KVM_REG_RISCV_CORE_REG(regs.s2) ... KVM_REG_RISCV_CORE_REG(regs.s11): + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)", + reg_off - KVM_REG_RISCV_CORE_REG(regs.s2) + 2); + case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6): + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)", + reg_off - KVM_REG_RISCV_CORE_REG(regs.t3) + 3); + case KVM_REG_RISCV_CORE_REG(mode): + return "KVM_REG_RISCV_CORE_REG(mode)"; + } + + TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id); + return NULL; +} + +#define RISCV_CSR_GENERAL(csr) \ + "KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(" #csr ")" +#define RISCV_CSR_AIA(csr) \ + "KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_REG(" #csr ")" + +static const char *general_csr_id_to_str(__u64 reg_off) +{ + /* reg_off is the offset into struct kvm_riscv_csr */ + switch (reg_off) { + case KVM_REG_RISCV_CSR_REG(sstatus): + return RISCV_CSR_GENERAL(sstatus); + case KVM_REG_RISCV_CSR_REG(sie): + return RISCV_CSR_GENERAL(sie); + case KVM_REG_RISCV_CSR_REG(stvec): + return RISCV_CSR_GENERAL(stvec); + case KVM_REG_RISCV_CSR_REG(sscratch): + return RISCV_CSR_GENERAL(sscratch); + case KVM_REG_RISCV_CSR_REG(sepc): + return RISCV_CSR_GENERAL(sepc); + case KVM_REG_RISCV_CSR_REG(scause): + return RISCV_CSR_GENERAL(scause); + case KVM_REG_RISCV_CSR_REG(stval): + return RISCV_CSR_GENERAL(stval); + case KVM_REG_RISCV_CSR_REG(sip): + return RISCV_CSR_GENERAL(sip); + case KVM_REG_RISCV_CSR_REG(satp): + return RISCV_CSR_GENERAL(satp); + case KVM_REG_RISCV_CSR_REG(scounteren): + return RISCV_CSR_GENERAL(scounteren); + } + + TEST_FAIL("Unknown general csr reg: 0x%llx", reg_off); + return NULL; +} + +static const char *aia_csr_id_to_str(__u64 reg_off) +{ + /* reg_off is the offset into struct kvm_riscv_aia_csr */ + switch (reg_off) { + case KVM_REG_RISCV_CSR_AIA_REG(siselect): + return RISCV_CSR_AIA(siselect); + case KVM_REG_RISCV_CSR_AIA_REG(iprio1): + return RISCV_CSR_AIA(iprio1); + case KVM_REG_RISCV_CSR_AIA_REG(iprio2): + return RISCV_CSR_AIA(iprio2); + case KVM_REG_RISCV_CSR_AIA_REG(sieh): + return RISCV_CSR_AIA(sieh); + case KVM_REG_RISCV_CSR_AIA_REG(siph): + return RISCV_CSR_AIA(siph); + case KVM_REG_RISCV_CSR_AIA_REG(iprio1h): + return RISCV_CSR_AIA(iprio1h); + case KVM_REG_RISCV_CSR_AIA_REG(iprio2h): + return RISCV_CSR_AIA(iprio2h); + } + + TEST_FAIL("Unknown aia csr reg: 0x%llx", reg_off); + return NULL; +} + +static const char *csr_id_to_str(const char *prefix, __u64 id) +{ + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CSR); + __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK; + + reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK; + + switch (reg_subtype) { + case KVM_REG_RISCV_CSR_GENERAL: + return general_csr_id_to_str(reg_off); + case KVM_REG_RISCV_CSR_AIA: + return aia_csr_id_to_str(reg_off); + } + + TEST_FAIL("%s: Unknown csr subtype: 0x%llx", prefix, reg_subtype); + return NULL; +} + +static const char *timer_id_to_str(const char *prefix, __u64 id) +{ + /* reg_off is the offset into struct kvm_riscv_timer */ + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_TIMER); + + switch (reg_off) { + case KVM_REG_RISCV_TIMER_REG(frequency): + return "KVM_REG_RISCV_TIMER_REG(frequency)"; + case KVM_REG_RISCV_TIMER_REG(time): + return "KVM_REG_RISCV_TIMER_REG(time)"; + case KVM_REG_RISCV_TIMER_REG(compare): + return "KVM_REG_RISCV_TIMER_REG(compare)"; + case KVM_REG_RISCV_TIMER_REG(state): + return "KVM_REG_RISCV_TIMER_REG(state)"; + } + + TEST_FAIL("%s: Unknown timer reg id: 0x%llx", prefix, id); + return NULL; +} + +static const char *fp_f_id_to_str(const char *prefix, __u64 id) +{ + /* reg_off is the offset into struct __riscv_f_ext_state */ + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_F); + + switch (reg_off) { + case KVM_REG_RISCV_FP_F_REG(f[0]) ... + KVM_REG_RISCV_FP_F_REG(f[31]): + return strdup_printf("KVM_REG_RISCV_FP_F_REG(f[%lld])", reg_off); + case KVM_REG_RISCV_FP_F_REG(fcsr): + return "KVM_REG_RISCV_FP_F_REG(fcsr)"; + } + + TEST_FAIL("%s: Unknown fp_f reg id: 0x%llx", prefix, id); + return NULL; +} + +static const char *fp_d_id_to_str(const char *prefix, __u64 id) +{ + /* reg_off is the offset into struct __riscv_d_ext_state */ + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_D); + + switch (reg_off) { + case KVM_REG_RISCV_FP_D_REG(f[0]) ... + KVM_REG_RISCV_FP_D_REG(f[31]): + return strdup_printf("KVM_REG_RISCV_FP_D_REG(f[%lld])", reg_off); + case KVM_REG_RISCV_FP_D_REG(fcsr): + return "KVM_REG_RISCV_FP_D_REG(fcsr)"; + } + + TEST_FAIL("%s: Unknown fp_d reg id: 0x%llx", prefix, id); + return NULL; +} + +static const char *isa_ext_id_to_str(__u64 id) +{ + /* reg_off is the offset into unsigned long kvm_isa_ext_arr[] */ + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT); + + static const char * const kvm_isa_ext_reg_name[] = { + "KVM_RISCV_ISA_EXT_A", + "KVM_RISCV_ISA_EXT_C", + "KVM_RISCV_ISA_EXT_D", + "KVM_RISCV_ISA_EXT_F", + "KVM_RISCV_ISA_EXT_H", + "KVM_RISCV_ISA_EXT_I", + "KVM_RISCV_ISA_EXT_M", + "KVM_RISCV_ISA_EXT_SVPBMT", + "KVM_RISCV_ISA_EXT_SSTC", + "KVM_RISCV_ISA_EXT_SVINVAL", + "KVM_RISCV_ISA_EXT_ZIHINTPAUSE", + "KVM_RISCV_ISA_EXT_ZICBOM", + "KVM_RISCV_ISA_EXT_ZICBOZ", + "KVM_RISCV_ISA_EXT_ZBB", + "KVM_RISCV_ISA_EXT_SSAIA", + "KVM_RISCV_ISA_EXT_V", + "KVM_RISCV_ISA_EXT_SVNAPOT", + "KVM_RISCV_ISA_EXT_ZBA", + "KVM_RISCV_ISA_EXT_ZBS", + "KVM_RISCV_ISA_EXT_ZICNTR", + "KVM_RISCV_ISA_EXT_ZICSR", + "KVM_RISCV_ISA_EXT_ZIFENCEI", + "KVM_RISCV_ISA_EXT_ZIHPM", + }; + + if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name)) { + /* + * isa_ext regs would grow regularly with new isa extension added, so + * just show "reg" to indicate a new extension. + */ + return strdup_printf("%lld /* UNKNOWN */", reg_off); + } + + return kvm_isa_ext_reg_name[reg_off]; +} + +static const char *sbi_ext_single_id_to_str(__u64 reg_off) +{ + /* reg_off is KVM_RISCV_SBI_EXT_ID */ + static const char * const kvm_sbi_ext_reg_name[] = { + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01", + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME", + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI", + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE", + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST", + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM", + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU", + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL", + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR", + }; + + if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name)) { + /* + * sbi_ext regs would grow regularly with new sbi extension added, so + * just show "reg" to indicate a new extension. + */ + return strdup_printf("KVM_REG_RISCV_SBI_SINGLE | %lld /* UNKNOWN */", reg_off); + } + + return kvm_sbi_ext_reg_name[reg_off]; +} + +static const char *sbi_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off) +{ + if (reg_off > KVM_REG_RISCV_SBI_MULTI_REG_LAST) { + /* + * sbi_ext regs would grow regularly with new sbi extension added, so + * just show "reg" to indicate a new extension. + */ + return strdup_printf("%lld /* UNKNOWN */", reg_off); + } + + switch (reg_subtype) { + case KVM_REG_RISCV_SBI_MULTI_EN: + return strdup_printf("KVM_REG_RISCV_SBI_MULTI_EN | %lld", reg_off); + case KVM_REG_RISCV_SBI_MULTI_DIS: + return strdup_printf("KVM_REG_RISCV_SBI_MULTI_DIS | %lld", reg_off); + } + + return NULL; +} + +static const char *sbi_ext_id_to_str(const char *prefix, __u64 id) +{ + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_EXT); + __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK; + + reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK; + + switch (reg_subtype) { + case KVM_REG_RISCV_SBI_SINGLE: + return sbi_ext_single_id_to_str(reg_off); + case KVM_REG_RISCV_SBI_MULTI_EN: + case KVM_REG_RISCV_SBI_MULTI_DIS: + return sbi_ext_multi_id_to_str(reg_subtype, reg_off); + } + + TEST_FAIL("%s: Unknown sbi ext subtype: 0x%llx", prefix, reg_subtype); + return NULL; +} + +void print_reg(const char *prefix, __u64 id) +{ + const char *reg_size = NULL; + + TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_RISCV, + "%s: KVM_REG_RISCV missing in reg id: 0x%llx", prefix, id); + + switch (id & KVM_REG_SIZE_MASK) { + case KVM_REG_SIZE_U32: + reg_size = "KVM_REG_SIZE_U32"; + break; + case KVM_REG_SIZE_U64: + reg_size = "KVM_REG_SIZE_U64"; + break; + case KVM_REG_SIZE_U128: + reg_size = "KVM_REG_SIZE_U128"; + break; + default: + TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx", + prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id); + } + + switch (id & KVM_REG_RISCV_TYPE_MASK) { + case KVM_REG_RISCV_CONFIG: + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CONFIG | %s,\n", + reg_size, config_id_to_str(id)); + break; + case KVM_REG_RISCV_CORE: + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CORE | %s,\n", + reg_size, core_id_to_str(prefix, id)); + break; + case KVM_REG_RISCV_CSR: + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CSR | %s,\n", + reg_size, csr_id_to_str(prefix, id)); + break; + case KVM_REG_RISCV_TIMER: + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_TIMER | %s,\n", + reg_size, timer_id_to_str(prefix, id)); + break; + case KVM_REG_RISCV_FP_F: + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_F | %s,\n", + reg_size, fp_f_id_to_str(prefix, id)); + break; + case KVM_REG_RISCV_FP_D: + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_D | %s,\n", + reg_size, fp_d_id_to_str(prefix, id)); + break; + case KVM_REG_RISCV_ISA_EXT: + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_ISA_EXT | %s,\n", + reg_size, isa_ext_id_to_str(id)); + break; + case KVM_REG_RISCV_SBI_EXT: + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n", + reg_size, sbi_ext_id_to_str(prefix, id)); + break; + default: + TEST_FAIL("%s: Unexpected reg type: 0x%llx in reg id: 0x%llx", prefix, + (id & KVM_REG_RISCV_TYPE_MASK) >> KVM_REG_RISCV_TYPE_SHIFT, id); + } +} + +/* + * The current blessed list was primed with the output of kernel version + * v6.5-rc3 and then later updated with new registers. + */ +static __u64 base_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(satp_mode), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.gp), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.tp), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t0), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t1), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t2), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s0), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s1), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a0), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a1), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a2), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a3), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a4), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a5), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a6), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a7), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s2), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s3), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s4), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s5), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s6), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s7), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s8), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s9), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s10), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s11), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t3), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t5), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(mode), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sstatus), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sie), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stvec), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sscratch), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sepc), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scause), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stval), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sip), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(satp), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scounteren), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A, + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C, + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I, + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M, + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01, + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME, + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI, + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE, + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST, + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM, + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU, + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL, + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR, + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_EN | 0, + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_DIS | 0, +}; + +/* + * The skips_set list registers that should skip set test. + * - KVM_REG_RISCV_TIMER_REG(state): set would fail if it was not initialized properly. + */ +static __u64 base_skips_set[] = { + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state), +}; + +static __u64 h_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H, +}; + +static __u64 zicbom_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM, +}; + +static __u64 zicboz_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ, +}; + +static __u64 svpbmt_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT, +}; + +static __u64 sstc_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC, +}; + +static __u64 svinval_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL, +}; + +static __u64 zihintpause_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE, +}; + +static __u64 zba_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA, +}; + +static __u64 zbb_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB, +}; + +static __u64 zbs_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS, +}; + +static __u64 zicntr_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR, +}; + +static __u64 zicsr_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR, +}; + +static __u64 zifencei_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI, +}; + +static __u64 zihpm_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM, +}; + +static __u64 aia_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA, +}; + +static __u64 fp_f_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[0]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[1]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[2]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[3]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[4]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[5]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[6]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[7]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[8]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[9]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[10]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[11]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[12]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[13]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[14]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[15]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[16]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[17]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[18]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[19]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[20]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[21]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[22]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[23]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[24]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[25]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[26]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[27]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[28]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[29]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F, +}; + +static __u64 fp_d_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[0]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[1]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[2]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[3]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[4]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[5]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[6]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[7]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[8]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[9]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[10]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[11]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[12]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[13]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[14]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[15]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[16]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[17]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[18]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[19]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[20]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[21]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[22]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[23]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[24]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[25]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[26]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[27]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[28]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[29]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]), + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]), + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D, +}; + +#define BASE_SUBLIST \ + {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \ + .skips_set = base_skips_set, .skips_set_n = ARRAY_SIZE(base_skips_set),} +#define H_REGS_SUBLIST \ + {"h", .feature = KVM_RISCV_ISA_EXT_H, .regs = h_regs, .regs_n = ARRAY_SIZE(h_regs),} +#define ZICBOM_REGS_SUBLIST \ + {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, .regs_n = ARRAY_SIZE(zicbom_regs),} +#define ZICBOZ_REGS_SUBLIST \ + {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, .regs_n = ARRAY_SIZE(zicboz_regs),} +#define SVPBMT_REGS_SUBLIST \ + {"svpbmt", .feature = KVM_RISCV_ISA_EXT_SVPBMT, .regs = svpbmt_regs, .regs_n = ARRAY_SIZE(svpbmt_regs),} +#define SSTC_REGS_SUBLIST \ + {"sstc", .feature = KVM_RISCV_ISA_EXT_SSTC, .regs = sstc_regs, .regs_n = ARRAY_SIZE(sstc_regs),} +#define SVINVAL_REGS_SUBLIST \ + {"svinval", .feature = KVM_RISCV_ISA_EXT_SVINVAL, .regs = svinval_regs, .regs_n = ARRAY_SIZE(svinval_regs),} +#define ZIHINTPAUSE_REGS_SUBLIST \ + {"zihintpause", .feature = KVM_RISCV_ISA_EXT_ZIHINTPAUSE, .regs = zihintpause_regs, .regs_n = ARRAY_SIZE(zihintpause_regs),} +#define ZBA_REGS_SUBLIST \ + {"zba", .feature = KVM_RISCV_ISA_EXT_ZBA, .regs = zba_regs, .regs_n = ARRAY_SIZE(zba_regs),} +#define ZBB_REGS_SUBLIST \ + {"zbb", .feature = KVM_RISCV_ISA_EXT_ZBB, .regs = zbb_regs, .regs_n = ARRAY_SIZE(zbb_regs),} +#define ZBS_REGS_SUBLIST \ + {"zbs", .feature = KVM_RISCV_ISA_EXT_ZBS, .regs = zbs_regs, .regs_n = ARRAY_SIZE(zbs_regs),} +#define ZICNTR_REGS_SUBLIST \ + {"zicntr", .feature = KVM_RISCV_ISA_EXT_ZICNTR, .regs = zicntr_regs, .regs_n = ARRAY_SIZE(zicntr_regs),} +#define ZICSR_REGS_SUBLIST \ + {"zicsr", .feature = KVM_RISCV_ISA_EXT_ZICSR, .regs = zicsr_regs, .regs_n = ARRAY_SIZE(zicsr_regs),} +#define ZIFENCEI_REGS_SUBLIST \ + {"zifencei", .feature = KVM_RISCV_ISA_EXT_ZIFENCEI, .regs = zifencei_regs, .regs_n = ARRAY_SIZE(zifencei_regs),} +#define ZIHPM_REGS_SUBLIST \ + {"zihpm", .feature = KVM_RISCV_ISA_EXT_ZIHPM, .regs = zihpm_regs, .regs_n = ARRAY_SIZE(zihpm_regs),} +#define AIA_REGS_SUBLIST \ + {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_regs, .regs_n = ARRAY_SIZE(aia_regs),} +#define FP_F_REGS_SUBLIST \ + {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \ + .regs_n = ARRAY_SIZE(fp_f_regs),} +#define FP_D_REGS_SUBLIST \ + {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \ + .regs_n = ARRAY_SIZE(fp_d_regs),} + +static struct vcpu_reg_list h_config = { + .sublists = { + BASE_SUBLIST, + H_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list zicbom_config = { + .sublists = { + BASE_SUBLIST, + ZICBOM_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list zicboz_config = { + .sublists = { + BASE_SUBLIST, + ZICBOZ_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list svpbmt_config = { + .sublists = { + BASE_SUBLIST, + SVPBMT_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list sstc_config = { + .sublists = { + BASE_SUBLIST, + SSTC_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list svinval_config = { + .sublists = { + BASE_SUBLIST, + SVINVAL_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list zihintpause_config = { + .sublists = { + BASE_SUBLIST, + ZIHINTPAUSE_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list zba_config = { + .sublists = { + BASE_SUBLIST, + ZBA_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list zbb_config = { + .sublists = { + BASE_SUBLIST, + ZBB_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list zbs_config = { + .sublists = { + BASE_SUBLIST, + ZBS_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list zicntr_config = { + .sublists = { + BASE_SUBLIST, + ZICNTR_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list zicsr_config = { + .sublists = { + BASE_SUBLIST, + ZICSR_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list zifencei_config = { + .sublists = { + BASE_SUBLIST, + ZIFENCEI_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list zihpm_config = { + .sublists = { + BASE_SUBLIST, + ZIHPM_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list aia_config = { + .sublists = { + BASE_SUBLIST, + AIA_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list fp_f_config = { + .sublists = { + BASE_SUBLIST, + FP_F_REGS_SUBLIST, + {0}, + }, +}; + +static struct vcpu_reg_list fp_d_config = { + .sublists = { + BASE_SUBLIST, + FP_D_REGS_SUBLIST, + {0}, + }, +}; + +struct vcpu_reg_list *vcpu_configs[] = { + &h_config, + &zicbom_config, + &zicboz_config, + &svpbmt_config, + &sstc_config, + &svinval_config, + &zihintpause_config, + &zba_config, + &zbb_config, + &zbs_config, + &zicntr_config, + &zicsr_config, + &zifencei_config, + &zihpm_config, + &aia_config, + &fp_f_config, + &fp_d_config, +}; +int vcpu_configs_n = ARRAY_SIZE(vcpu_configs); -- cgit From a6b33d009fc1fe80c935f18b714b36c81e1f1400 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Mon, 14 Aug 2023 22:06:36 +0800 Subject: KVM: arm64: Remove unused declarations Commit 53692908b0f5 ("KVM: arm/arm64: vgic: Fix source vcpu issues for GICv2 SGI") removed vgic_v2_set_npie()/vgic_v3_set_npie() but not the declarations. Commit 29eb5a3c57f7 ("KVM: arm64: Handle PtrAuth traps early") left behind kvm_arm_vcpu_ptrauth_trap(), remove it. Commit 2a0c343386ae ("KVM: arm64: Initialize trap registers for protected VMs") declared but never implemented kvm_init_protected_traps() and commit cf5d318865e2 ("arm/arm64: KVM: Turn off vcpus on PSCI shutdown/reboot") declared but never implemented force_vm_exit(). Signed-off-by: Yue Haibing Reviewed-by: Zenghui Yu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230814140636.45988-1-yuehaibing@huawei.com --- arch/arm64/include/asm/kvm_host.h | 6 ------ arch/arm64/kvm/vgic/vgic.h | 2 -- 2 files changed, 8 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index d3dd05bbfe23..d1a40fa26369 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -967,8 +967,6 @@ void kvm_arm_resume_guest(struct kvm *kvm); #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__) #endif /* __KVM_NVHE_HYPERVISOR__ */ -void force_vm_exit(const cpumask_t *mask); - int handle_exit(struct kvm_vcpu *vcpu, int exception_index); void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); @@ -1049,8 +1047,6 @@ static inline bool kvm_system_needs_idmapped_vectors(void) return cpus_have_const_cap(ARM64_SPECTRE_V3A); } -void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); - static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} @@ -1118,8 +1114,6 @@ static inline bool kvm_vm_is_protected(struct kvm *kvm) return false; } -void kvm_init_protected_traps(struct kvm_vcpu *vcpu); - int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index f9923beedd27..0ab09b0d4440 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -199,7 +199,6 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu); void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); -void vgic_v2_set_npie(struct kvm_vcpu *vcpu); int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, int offset, u32 *val); @@ -233,7 +232,6 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu); void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); -void vgic_v3_set_npie(struct kvm_vcpu *vcpu); void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v3_enable(struct kvm_vcpu *vcpu); -- cgit From a1342c8027288e345cc5fd16c6800f9d4eb788ed Mon Sep 17 00:00:00 2001 From: David Matlack Date: Fri, 11 Aug 2023 04:51:14 +0000 Subject: KVM: Rename kvm_arch_flush_remote_tlb() to kvm_arch_flush_remote_tlbs() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename kvm_arch_flush_remote_tlb() and the associated macro __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB to kvm_arch_flush_remote_tlbs() and __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS respectively. Making the name plural matches kvm_flush_remote_tlbs() and makes it more clear that this function can affect more than one remote TLB. No functional change intended. Signed-off-by: David Matlack Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Gavin Shan Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Shaoqin Huang Acked-by: Sean Christopherson Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230811045127.3308641-2-rananta@google.com --- arch/mips/include/asm/kvm_host.h | 4 ++-- arch/mips/kvm/mips.c | 2 +- arch/x86/include/asm/kvm_host.h | 4 ++-- include/linux/kvm_host.h | 4 ++-- virt/kvm/kvm_main.c | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 04cedf9f8811..9b0ad8f3bf32 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -896,7 +896,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} -#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB -int kvm_arch_flush_remote_tlb(struct kvm *kvm); +#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS +int kvm_arch_flush_remote_tlbs(struct kvm *kvm); #endif /* __MIPS_KVM_HOST_H__ */ diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index aa5583a7b05b..4b7bc39a4173 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -981,7 +981,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) } -int kvm_arch_flush_remote_tlb(struct kvm *kvm) +int kvm_arch_flush_remote_tlbs(struct kvm *kvm) { kvm_mips_callbacks->prepare_flush_shadow(kvm); return 1; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 28bd38303d70..a2d3cfc2eb75 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1794,8 +1794,8 @@ static inline struct kvm *kvm_arch_alloc_vm(void) #define __KVM_HAVE_ARCH_VM_FREE void kvm_arch_free_vm(struct kvm *kvm); -#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB -static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) +#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS +static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm) { if (kvm_x86_ops.flush_remote_tlbs && !static_call(kvm_x86_flush_remote_tlbs)(kvm)) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9d3ac7720da9..e3f968b38ae9 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1479,8 +1479,8 @@ static inline void kvm_arch_free_vm(struct kvm *kvm) } #endif -#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB -static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) +#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS +static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm) { return -ENOTSUPP; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index dfbaafbe3a00..70e5479797ac 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -361,7 +361,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that * barrier here. */ - if (!kvm_arch_flush_remote_tlb(kvm) + if (!kvm_arch_flush_remote_tlbs(kvm) || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) ++kvm->stat.generic.remote_tlb_flush; } -- cgit From cfb0c08e80120928dda1e951718be135abd49bae Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 11 Aug 2023 04:51:15 +0000 Subject: KVM: Declare kvm_arch_flush_remote_tlbs() globally There's no reason for the architectures to declare kvm_arch_flush_remote_tlbs() in their own headers. Hence to avoid this duplication, make the declaration global, leaving the architectures to define only __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS as needed. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Shaoqin Huang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230811045127.3308641-3-rananta@google.com --- arch/mips/include/asm/kvm_host.h | 1 - include/linux/kvm_host.h | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 9b0ad8f3bf32..54a85f1d4f2c 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -897,6 +897,5 @@ static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS -int kvm_arch_flush_remote_tlbs(struct kvm *kvm); #endif /* __MIPS_KVM_HOST_H__ */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index e3f968b38ae9..ade5d4500c2c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1484,6 +1484,8 @@ static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm) { return -ENOTSUPP; } +#else +int kvm_arch_flush_remote_tlbs(struct kvm *kvm); #endif #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA -- cgit From 32121c813818a87ba7565b3afce93a9cc3610a22 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 11 Aug 2023 04:51:16 +0000 Subject: KVM: arm64: Use kvm_arch_flush_remote_tlbs() Stop depending on CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL and opt to standardize on kvm_arch_flush_remote_tlbs() since it avoids duplicating the generic TLB stats across architectures that implement their own remote TLB flush. This adds an extra function call to the ARM64 kvm_flush_remote_tlbs() path, but that is a small cost in comparison to flushing remote TLBs. In addition, instead of just incrementing remote_tlb_flush_requests stat, the generic interface would also increment the remote_tlb_flush stat. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Shaoqin Huang Reviewed-by: Gavin Shan Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230811045127.3308641-4-rananta@google.com --- arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/Kconfig | 1 - arch/arm64/kvm/mmu.c | 6 +++--- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index d3dd05bbfe23..44ae44b502cf 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1113,6 +1113,8 @@ int __init kvm_set_ipa_limit(void); #define __KVM_HAVE_ARCH_VM_ALLOC struct kvm *kvm_arch_alloc_vm(void); +#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS + static inline bool kvm_vm_is_protected(struct kvm *kvm) { return false; diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index f531da6b362e..6b730fcfee37 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -25,7 +25,6 @@ menuconfig KVM select MMU_NOTIFIER select PREEMPT_NOTIFIERS select HAVE_KVM_CPU_RELAX_INTERCEPT - select HAVE_KVM_ARCH_TLB_FLUSH_ALL select KVM_MMIO select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_XFER_TO_GUEST_WORK diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index d3b4feed460c..3494659d939c 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -161,15 +161,15 @@ static bool memslot_is_logging(struct kvm_memory_slot *memslot) } /** - * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8 + * kvm_arch_flush_remote_tlbs() - flush all VM TLB entries for v7/8 * @kvm: pointer to kvm structure. * * Interface to HYP function to flush all VM TLB entries */ -void kvm_flush_remote_tlbs(struct kvm *kvm) +int kvm_arch_flush_remote_tlbs(struct kvm *kvm) { - ++kvm->stat.generic.remote_tlb_flush_requests; kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu); + return 0; } static bool kvm_is_device_pfn(unsigned long pfn) -- cgit From eddd21481011008792f4e647a5244f6e15970abc Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 11 Aug 2023 04:51:17 +0000 Subject: KVM: Remove CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kvm_arch_flush_remote_tlbs() or CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL are two mechanisms to solve the same problem, allowing architecture-specific code to provide a non-IPI implementation of remote TLB flushing. Dropping CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL allows KVM to standardize all architectures on kvm_arch_flush_remote_tlbs() instead of maintaining two mechanisms. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Shaoqin Huang Reviewed-by: Gavin Shan Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230811045127.3308641-5-rananta@google.com --- virt/kvm/Kconfig | 3 --- virt/kvm/kvm_main.c | 2 -- 2 files changed, 5 deletions(-) diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index b74916de5183..484d0873061c 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -62,9 +62,6 @@ config HAVE_KVM_CPU_RELAX_INTERCEPT config KVM_VFIO bool -config HAVE_KVM_ARCH_TLB_FLUSH_ALL - bool - config HAVE_KVM_INVALID_WAKEUPS bool diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 70e5479797ac..d6b050786155 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -345,7 +345,6 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) } EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request); -#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL void kvm_flush_remote_tlbs(struct kvm *kvm) { ++kvm->stat.generic.remote_tlb_flush_requests; @@ -366,7 +365,6 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) ++kvm->stat.generic.remote_tlb_flush; } EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); -#endif static void kvm_flush_shadow_all(struct kvm *kvm) { -- cgit From d4788996051e3c07fadc6d9b214073fcf78810a8 Mon Sep 17 00:00:00 2001 From: David Matlack Date: Fri, 11 Aug 2023 04:51:18 +0000 Subject: KVM: Allow range-based TLB invalidation from common code Make kvm_flush_remote_tlbs_range() visible in common code and create a default implementation that just invalidates the whole TLB. This paves the way for several future features/cleanups: - Introduction of range-based TLBI on ARM. - Eliminating kvm_arch_flush_remote_tlbs_memslot() - Moving the KVM/x86 TDP MMU to common code. No functional change intended. Signed-off-by: David Matlack Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Gavin Shan Reviewed-by: Shaoqin Huang Reviewed-by: Anup Patel Acked-by: Sean Christopherson Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230811045127.3308641-6-rananta@google.com --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/mmu/mmu.c | 12 ++++-------- arch/x86/kvm/mmu/mmu_internal.h | 3 --- include/linux/kvm_host.h | 11 +++++++++++ virt/kvm/kvm_main.c | 13 +++++++++++++ 5 files changed, 30 insertions(+), 11 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a2d3cfc2eb75..b547d17c58f6 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1804,6 +1804,8 @@ static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm) return -ENOTSUPP; } +#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE + #define kvm_arch_pmi_in_guest(vcpu) \ ((vcpu) && (vcpu)->arch.handling_intr_from_guest) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ec169f5c7dce..46ae672668e1 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -278,16 +278,12 @@ static inline bool kvm_available_flush_remote_tlbs_range(void) return kvm_x86_ops.flush_remote_tlbs_range; } -void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, - gfn_t nr_pages) +int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) { - int ret = -EOPNOTSUPP; + if (!kvm_x86_ops.flush_remote_tlbs_range) + return -EOPNOTSUPP; - if (kvm_x86_ops.flush_remote_tlbs_range) - ret = static_call(kvm_x86_flush_remote_tlbs_range)(kvm, start_gfn, - nr_pages); - if (ret) - kvm_flush_remote_tlbs(kvm); + return static_call(kvm_x86_flush_remote_tlbs_range)(kvm, gfn, nr_pages); } static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index d39af5639ce9..86cb83bb3480 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -170,9 +170,6 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn, int min_level); -void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, - gfn_t nr_pages); - /* Flush the given page (huge or not) of guest memory. */ static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level) { diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ade5d4500c2c..89d2614e4b7a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1359,6 +1359,7 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode); void kvm_flush_remote_tlbs(struct kvm *kvm); +void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages); #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); @@ -1488,6 +1489,16 @@ static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm) int kvm_arch_flush_remote_tlbs(struct kvm *kvm); #endif +#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE +static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, + gfn_t gfn, u64 nr_pages) +{ + return -EOPNOTSUPP; +} +#else +int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages); +#endif + #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA void kvm_arch_register_noncoherent_dma(struct kvm *kvm); void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d6b050786155..26e91000f579 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -366,6 +366,19 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) } EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); +void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) +{ + if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages)) + return; + + /* + * Fall back to a flushing entire TLBs if the architecture range-based + * TLB invalidation is unsupported or can't be performed for whatever + * reason. + */ + kvm_flush_remote_tlbs(kvm); +} + static void kvm_flush_shadow_all(struct kvm *kvm) { kvm_arch_flush_shadow_all(kvm); -- cgit From 619b5072443c05cf18c31b2c0320cdb42396d411 Mon Sep 17 00:00:00 2001 From: David Matlack Date: Fri, 11 Aug 2023 04:51:19 +0000 Subject: KVM: Move kvm_arch_flush_remote_tlbs_memslot() to common code Move kvm_arch_flush_remote_tlbs_memslot() to common code and drop "arch_" from the name. kvm_arch_flush_remote_tlbs_memslot() is just a range-based TLB invalidation where the range is defined by the memslot. Now that kvm_flush_remote_tlbs_range() can be called from common code we can just use that and drop a bunch of duplicate code from the arch directories. Note this adds a lockdep assertion for slots_lock being held when calling kvm_flush_remote_tlbs_memslot(), which was previously only asserted on x86. MIPS has calls to kvm_flush_remote_tlbs_memslot(), but they all hold the slots_lock, so the lockdep assertion continues to hold true. Also drop the CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT ifdef gating kvm_flush_remote_tlbs_memslot(), since it is no longer necessary. Signed-off-by: David Matlack Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Gavin Shan Reviewed-by: Shaoqin Huang Acked-by: Anup Patel Acked-by: Sean Christopherson Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230811045127.3308641-7-rananta@google.com --- arch/arm64/kvm/arm.c | 6 ------ arch/mips/kvm/mips.c | 10 ++-------- arch/riscv/kvm/mmu.c | 6 ------ arch/x86/kvm/mmu/mmu.c | 16 +--------------- arch/x86/kvm/x86.c | 2 +- include/linux/kvm_host.h | 7 +++---- virt/kvm/kvm_main.c | 18 ++++++++++++++++-- 7 files changed, 23 insertions(+), 42 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 72dc53a75d1c..fd2af63d788d 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1532,12 +1532,6 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) } -void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - const struct kvm_memory_slot *memslot) -{ - kvm_flush_remote_tlbs(kvm); -} - static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr) { diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 4b7bc39a4173..231ac052b506 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -199,7 +199,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, /* Flush slot from GPA */ kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, slot->base_gfn + slot->npages - 1); - kvm_arch_flush_remote_tlbs_memslot(kvm, slot); + kvm_flush_remote_tlbs_memslot(kvm, slot); spin_unlock(&kvm->mmu_lock); } @@ -235,7 +235,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, new->base_gfn + new->npages - 1); if (needs_flush) - kvm_arch_flush_remote_tlbs_memslot(kvm, new); + kvm_flush_remote_tlbs_memslot(kvm, new); spin_unlock(&kvm->mmu_lock); } } @@ -987,12 +987,6 @@ int kvm_arch_flush_remote_tlbs(struct kvm *kvm) return 1; } -void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - const struct kvm_memory_slot *memslot) -{ - kvm_flush_remote_tlbs(kvm); -} - int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { int r; diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index f2eb47925806..97e129620686 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -406,12 +406,6 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { } -void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - const struct kvm_memory_slot *memslot) -{ - kvm_flush_remote_tlbs(kvm); -} - void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free) { } diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 46ae672668e1..dbf3c6c2316c 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -6666,7 +6666,7 @@ static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm, */ if (walk_slot_rmaps(kvm, slot, kvm_mmu_zap_collapsible_spte, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true)) - kvm_arch_flush_remote_tlbs_memslot(kvm, slot); + kvm_flush_remote_tlbs_memslot(kvm, slot); } void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, @@ -6685,20 +6685,6 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, } } -void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - const struct kvm_memory_slot *memslot) -{ - /* - * All current use cases for flushing the TLBs for a specific memslot - * related to dirty logging, and many do the TLB flush out of mmu_lock. - * The interaction between the various operations on memslot must be - * serialized by slots_locks to ensure the TLB flush from one operation - * is observed by any other operation on the same memslot. - */ - lockdep_assert_held(&kvm->slots_lock); - kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages); -} - void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, const struct kvm_memory_slot *memslot) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a6b9bea62fb8..faeb2e307b36 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -12751,7 +12751,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, * See is_writable_pte() for more details (the case involving * access-tracked SPTEs is particularly relevant). */ - kvm_arch_flush_remote_tlbs_memslot(kvm, new); + kvm_flush_remote_tlbs_memslot(kvm, new); } } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 89d2614e4b7a..394db2ce11e2 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1360,6 +1360,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode); void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages); +void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, + const struct kvm_memory_slot *memslot); #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); @@ -1388,10 +1390,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, unsigned long mask); void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); -#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT -void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - const struct kvm_memory_slot *memslot); -#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ +#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int *is_dirty, struct kvm_memory_slot **memslot); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 26e91000f579..5d4d2e051aa0 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -379,6 +379,20 @@ void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) kvm_flush_remote_tlbs(kvm); } +void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, + const struct kvm_memory_slot *memslot) +{ + /* + * All current use cases for flushing the TLBs for a specific memslot + * are related to dirty logging, and many do the TLB flush out of + * mmu_lock. The interaction between the various operations on memslot + * must be serialized by slots_locks to ensure the TLB flush from one + * operation is observed by any other operation on the same memslot. + */ + lockdep_assert_held(&kvm->slots_lock); + kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages); +} + static void kvm_flush_shadow_all(struct kvm *kvm) { kvm_arch_flush_shadow_all(kvm); @@ -2191,7 +2205,7 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) } if (flush) - kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); + kvm_flush_remote_tlbs_memslot(kvm, memslot); if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) return -EFAULT; @@ -2308,7 +2322,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm, KVM_MMU_UNLOCK(kvm); if (flush) - kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); + kvm_flush_remote_tlbs_memslot(kvm, memslot); return 0; } -- cgit From 360839027a6e4c022e8cbaa373dd747185f1e0a5 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 11 Aug 2023 04:51:20 +0000 Subject: arm64: tlb: Refactor the core flush algorithm of __flush_tlb_range Currently, the core TLB flush functionality of __flush_tlb_range() hardcodes vae1is (and variants) for the flush operation. In the upcoming patches, the KVM code reuses this core algorithm with ipas2e1is for range based TLB invalidations based on the IPA. Hence, extract the core flush functionality of __flush_tlb_range() into its own macro that accepts an 'op' argument to pass any TLBI operation, such that other callers (KVM) can benefit. No functional changes intended. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Catalin Marinas Reviewed-by: Gavin Shan Reviewed-by: Shaoqin Huang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230811045127.3308641-8-rananta@google.com --- arch/arm64/include/asm/tlbflush.h | 121 +++++++++++++++++++++----------------- 1 file changed, 68 insertions(+), 53 deletions(-) diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 412a3b9a3c25..b9475a852d5b 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -278,14 +278,74 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, */ #define MAX_TLBI_OPS PTRS_PER_PTE +/* + * __flush_tlb_range_op - Perform TLBI operation upon a range + * + * @op: TLBI instruction that operates on a range (has 'r' prefix) + * @start: The start address of the range + * @pages: Range as the number of pages from 'start' + * @stride: Flush granularity + * @asid: The ASID of the task (0 for IPA instructions) + * @tlb_level: Translation Table level hint, if known + * @tlbi_user: If 'true', call an additional __tlbi_user() + * (typically for user ASIDs). 'flase' for IPA instructions + * + * When the CPU does not support TLB range operations, flush the TLB + * entries one by one at the granularity of 'stride'. If the TLB + * range ops are supported, then: + * + * 1. If 'pages' is odd, flush the first page through non-range + * operations; + * + * 2. For remaining pages: the minimum range granularity is decided + * by 'scale', so multiple range TLBI operations may be required. + * Start from scale = 0, flush the corresponding number of pages + * ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it + * until no pages left. + * + * Note that certain ranges can be represented by either num = 31 and + * scale or num = 0 and scale + 1. The loop below favours the latter + * since num is limited to 30 by the __TLBI_RANGE_NUM() macro. + */ +#define __flush_tlb_range_op(op, start, pages, stride, \ + asid, tlb_level, tlbi_user) \ +do { \ + int num = 0; \ + int scale = 0; \ + unsigned long addr; \ + \ + while (pages > 0) { \ + if (!system_supports_tlb_range() || \ + pages % 2 == 1) { \ + addr = __TLBI_VADDR(start, asid); \ + __tlbi_level(op, addr, tlb_level); \ + if (tlbi_user) \ + __tlbi_user_level(op, addr, tlb_level); \ + start += stride; \ + pages -= stride >> PAGE_SHIFT; \ + continue; \ + } \ + \ + num = __TLBI_RANGE_NUM(pages, scale); \ + if (num >= 0) { \ + addr = __TLBI_VADDR_RANGE(start, asid, scale, \ + num, tlb_level); \ + __tlbi(r##op, addr); \ + if (tlbi_user) \ + __tlbi_user(r##op, addr); \ + start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \ + pages -= __TLBI_RANGE_PAGES(num, scale); \ + } \ + scale++; \ + } \ +} while (0) + static inline void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long stride, bool last_level, int tlb_level) { - int num = 0; - int scale = 0; - unsigned long asid, addr, pages; + unsigned long asid, pages; start = round_down(start, stride); end = round_up(end, stride); @@ -307,56 +367,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, dsb(ishst); asid = ASID(vma->vm_mm); - /* - * When the CPU does not support TLB range operations, flush the TLB - * entries one by one at the granularity of 'stride'. If the TLB - * range ops are supported, then: - * - * 1. If 'pages' is odd, flush the first page through non-range - * operations; - * - * 2. For remaining pages: the minimum range granularity is decided - * by 'scale', so multiple range TLBI operations may be required. - * Start from scale = 0, flush the corresponding number of pages - * ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it - * until no pages left. - * - * Note that certain ranges can be represented by either num = 31 and - * scale or num = 0 and scale + 1. The loop below favours the latter - * since num is limited to 30 by the __TLBI_RANGE_NUM() macro. - */ - while (pages > 0) { - if (!system_supports_tlb_range() || - pages % 2 == 1) { - addr = __TLBI_VADDR(start, asid); - if (last_level) { - __tlbi_level(vale1is, addr, tlb_level); - __tlbi_user_level(vale1is, addr, tlb_level); - } else { - __tlbi_level(vae1is, addr, tlb_level); - __tlbi_user_level(vae1is, addr, tlb_level); - } - start += stride; - pages -= stride >> PAGE_SHIFT; - continue; - } - - num = __TLBI_RANGE_NUM(pages, scale); - if (num >= 0) { - addr = __TLBI_VADDR_RANGE(start, asid, scale, - num, tlb_level); - if (last_level) { - __tlbi(rvale1is, addr); - __tlbi_user(rvale1is, addr); - } else { - __tlbi(rvae1is, addr); - __tlbi_user(rvae1is, addr); - } - start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; - pages -= __TLBI_RANGE_PAGES(num, scale); - } - scale++; - } + if (last_level) + __flush_tlb_range_op(vale1is, start, pages, stride, asid, tlb_level, true); + else + __flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true); + dsb(ish); } -- cgit From 4d73a9c13aaa78b149ac04b02f0ee7973f233bfa Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 11 Aug 2023 04:51:21 +0000 Subject: arm64: tlb: Implement __flush_s2_tlb_range_op() Define __flush_s2_tlb_range_op(), as a wrapper over __flush_tlb_range_op(), for stage-2 specific range-based TLBI operations that doesn't necessarily have to deal with 'asid' and 'tlbi_user' arguments. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Shaoqin Huang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230811045127.3308641-9-rananta@google.com --- arch/arm64/include/asm/tlbflush.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index b9475a852d5b..93f4b397f9a1 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -340,6 +340,9 @@ do { \ } \ } while (0) +#define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \ + __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false) + static inline void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long stride, bool last_level, -- cgit From 6354d15052ec88273c24beae4c99e31c3d3889b6 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 11 Aug 2023 04:51:22 +0000 Subject: KVM: arm64: Implement __kvm_tlb_flush_vmid_range() Define __kvm_tlb_flush_vmid_range() (for VHE and nVHE) to flush a range of stage-2 page-tables using IPA in one go. If the system supports FEAT_TLBIRANGE, the following patches would conveniently replace global TLBI such as vmalls12e1is in the map, unmap, and dirty-logging paths with ripas2e1is instead. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Gavin Shan Reviewed-by: Shaoqin Huang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230811045127.3308641-10-rananta@google.com --- arch/arm64/include/asm/kvm_asm.h | 3 +++ arch/arm64/kvm/hyp/nvhe/hyp-main.c | 11 +++++++++++ arch/arm64/kvm/hyp/nvhe/tlb.c | 30 ++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/vhe/tlb.c | 28 ++++++++++++++++++++++++++++ 4 files changed, 72 insertions(+) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 7d170aaa2db4..2c27cb8cf442 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -70,6 +70,7 @@ enum __kvm_host_smccc_func { __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa, __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh, __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid, + __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range, __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context, __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff, __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr, @@ -229,6 +230,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level); +extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, + phys_addr_t start, unsigned long pages); extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu); extern void __kvm_timer_set_cntvoff(u64 cntvoff); diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index a169c619db60..857d9bc04fd4 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -135,6 +135,16 @@ static void handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context *host_ctx __kvm_tlb_flush_vmid_ipa_nsh(kern_hyp_va(mmu), ipa, level); } +static void +handle___kvm_tlb_flush_vmid_range(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1); + DECLARE_REG(phys_addr_t, start, host_ctxt, 2); + DECLARE_REG(unsigned long, pages, host_ctxt, 3); + + __kvm_tlb_flush_vmid_range(kern_hyp_va(mmu), start, pages); +} + static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1); @@ -327,6 +337,7 @@ static const hcall_t host_hcall[] = { HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa), HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa_nsh), HANDLE_FUNC(__kvm_tlb_flush_vmid), + HANDLE_FUNC(__kvm_tlb_flush_vmid_range), HANDLE_FUNC(__kvm_flush_cpu_context), HANDLE_FUNC(__kvm_timer_set_cntvoff), HANDLE_FUNC(__vgic_v3_read_vmcr), diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c index b9991bbd8e3f..1b265713d6be 100644 --- a/arch/arm64/kvm/hyp/nvhe/tlb.c +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c @@ -182,6 +182,36 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, __tlb_switch_to_host(&cxt); } +void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, + phys_addr_t start, unsigned long pages) +{ + struct tlb_inv_context cxt; + unsigned long stride; + + /* + * Since the range of addresses may not be mapped at + * the same level, assume the worst case as PAGE_SIZE + */ + stride = PAGE_SIZE; + start = round_down(start, stride); + + /* Switch to requested VMID */ + __tlb_switch_to_guest(mmu, &cxt, false); + + __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0); + + dsb(ish); + __tlbi(vmalle1is); + dsb(ish); + isb(); + + /* See the comment in __kvm_tlb_flush_vmid_ipa() */ + if (icache_is_vpipt()) + icache_inval_all_pou(); + + __tlb_switch_to_host(&cxt); +} + void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) { struct tlb_inv_context cxt; diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c index e69da550cdc5..46bd43f61d76 100644 --- a/arch/arm64/kvm/hyp/vhe/tlb.c +++ b/arch/arm64/kvm/hyp/vhe/tlb.c @@ -143,6 +143,34 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, __tlb_switch_to_host(&cxt); } +void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, + phys_addr_t start, unsigned long pages) +{ + struct tlb_inv_context cxt; + unsigned long stride; + + /* + * Since the range of addresses may not be mapped at + * the same level, assume the worst case as PAGE_SIZE + */ + stride = PAGE_SIZE; + start = round_down(start, stride); + + dsb(ishst); + + /* Switch to requested VMID */ + __tlb_switch_to_guest(mmu, &cxt); + + __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0); + + dsb(ish); + __tlbi(vmalle1is); + dsb(ish); + isb(); + + __tlb_switch_to_host(&cxt); +} + void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) { struct tlb_inv_context cxt; -- cgit From 117940aa6e5f8308f1529e1313660980f1dae771 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 11 Aug 2023 04:51:23 +0000 Subject: KVM: arm64: Define kvm_tlb_flush_vmid_range() Implement the helper kvm_tlb_flush_vmid_range() that acts as a wrapper for range-based TLB invalidations. For the given VMID, use the range-based TLBI instructions to do the job or fallback to invalidating all the TLB entries. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Gavin Shan Reviewed-by: Shaoqin Huang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230811045127.3308641-11-rananta@google.com --- arch/arm64/include/asm/kvm_pgtable.h | 10 ++++++++++ arch/arm64/kvm/hyp/pgtable.c | 20 ++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 929d355eae0a..d3e354bb8351 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -746,4 +746,14 @@ enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte); * kvm_pgtable_prot format. */ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte); + +/** + * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries + * + * @mmu: Stage-2 KVM MMU struct + * @addr: The base Intermediate physical address from which to invalidate + * @size: Size of the range from the base to invalidate + */ +void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, + phys_addr_t addr, size_t size); #endif /* __ARM64_KVM_PGTABLE_H__ */ diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index f7a93ef29250..fb58a229b744 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -670,6 +670,26 @@ static bool stage2_has_fwb(struct kvm_pgtable *pgt) return !(pgt->flags & KVM_PGTABLE_S2_NOFWB); } +void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, + phys_addr_t addr, size_t size) +{ + unsigned long pages, inval_pages; + + if (!system_supports_tlb_range()) { + kvm_call_hyp(__kvm_tlb_flush_vmid, mmu); + return; + } + + pages = size >> PAGE_SHIFT; + while (pages > 0) { + inval_pages = min(pages, MAX_TLBI_RANGE_PAGES); + kvm_call_hyp(__kvm_tlb_flush_vmid_range, mmu, addr, inval_pages); + + addr += inval_pages << PAGE_SHIFT; + pages -= inval_pages; + } +} + #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt)) static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot, -- cgit From c42b6f0b1cde4dd19e6b5dd052e67b87cc331b01 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 11 Aug 2023 04:51:24 +0000 Subject: KVM: arm64: Implement kvm_arch_flush_remote_tlbs_range() Implement kvm_arch_flush_remote_tlbs_range() for arm64 to invalidate the given range in the TLB. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Gavin Shan Reviewed-by: Shaoqin Huang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230811045127.3308641-12-rananta@google.com --- arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/mmu.c | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 44ae44b502cf..f623b989ddd1 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1115,6 +1115,8 @@ struct kvm *kvm_arch_alloc_vm(void); #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS +#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE + static inline bool kvm_vm_is_protected(struct kvm *kvm) { return false; diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 3494659d939c..c73cec797519 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -172,6 +172,14 @@ int kvm_arch_flush_remote_tlbs(struct kvm *kvm) return 0; } +int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, + gfn_t gfn, u64 nr_pages) +{ + kvm_tlb_flush_vmid_range(&kvm->arch.mmu, + gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); + return 0; +} + static bool kvm_is_device_pfn(unsigned long pfn) { return !pfn_is_map_memory(pfn); -- cgit From 3756b6f2bb3a242fef0867b39a23607f5aeca138 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 11 Aug 2023 04:51:25 +0000 Subject: KVM: arm64: Flush only the memslot after write-protect After write-protecting the region, currently KVM invalidates the entire TLB entries using kvm_flush_remote_tlbs(). Instead, scope the invalidation only to the targeted memslot. If supported, the architecture would use the range-based TLBI instructions to flush the memslot or else fallback to flushing all of the TLBs. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Gavin Shan Reviewed-by: Shaoqin Huang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230811045127.3308641-13-rananta@google.com --- arch/arm64/kvm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index c73cec797519..b16aff3f65f6 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1083,7 +1083,7 @@ static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) write_lock(&kvm->mmu_lock); stage2_wp_range(&kvm->arch.mmu, start, end); write_unlock(&kvm->mmu_lock); - kvm_flush_remote_tlbs(kvm); + kvm_flush_remote_tlbs_memslot(kvm, memslot); } /** -- cgit From defc8cc7abf0fcee8d73e440ee02827348d060e0 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 11 Aug 2023 04:51:26 +0000 Subject: KVM: arm64: Invalidate the table entries upon a range Currently, during the operations such as a hugepage collapse, KVM would flush the entire VM's context using 'vmalls12e1is' TLBI operation. Specifically, if the VM is faulting on many hugepages (say after dirty-logging), it creates a performance penalty for the guest whose pages have already been faulted earlier as they would have to refill their TLBs again. Instead, leverage kvm_tlb_flush_vmid_range() for table entries. If the system supports it, only the required range will be flushed. Else, it'll fallback to the previous mechanism. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Gavin Shan Reviewed-by: Shaoqin Huang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230811045127.3308641-14-rananta@google.com --- arch/arm64/kvm/hyp/pgtable.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index fb58a229b744..1a4af37fd8c0 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -806,7 +806,8 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx, * evicted pte value (if any). */ if (kvm_pte_table(ctx->old, ctx->level)) - kvm_call_hyp(__kvm_tlb_flush_vmid, mmu); + kvm_tlb_flush_vmid_range(mmu, ctx->addr, + kvm_granule_size(ctx->level)); else if (kvm_pte_valid(ctx->old)) kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level); -- cgit From 7657ea920c54218f123ddc1b572821695b669c13 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 11 Aug 2023 04:51:27 +0000 Subject: KVM: arm64: Use TLBI range-based instructions for unmap The current implementation of the stage-2 unmap walker traverses the given range and, as a part of break-before-make, performs TLB invalidations with a DSB for every PTE. A multitude of this combination could cause a performance bottleneck on some systems. Hence, if the system supports FEAT_TLBIRANGE, defer the TLB invalidations until the entire walk is finished, and then use range-based instructions to invalidate the TLBs in one go. Condition deferred TLB invalidation on the system supporting FWB, as the optimization is entirely pointless when the unmap walker needs to perform CMOs. Rename stage2_put_pte() to stage2_unmap_put_pte() as the function now serves the stage-2 unmap walker specifically, rather than acting generic. Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Shaoqin Huang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230811045127.3308641-15-rananta@google.com --- arch/arm64/kvm/hyp/pgtable.c | 40 +++++++++++++++++++++++++++++++++------- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 1a4af37fd8c0..f155b8c9e98c 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -831,16 +831,36 @@ static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t n smp_store_release(ctx->ptep, new); } -static void stage2_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu, - struct kvm_pgtable_mm_ops *mm_ops) +static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt) { /* - * Clear the existing PTE, and perform break-before-make with - * TLB maintenance if it was valid. + * If FEAT_TLBIRANGE is implemented, defer the individual + * TLB invalidations until the entire walk is finished, and + * then use the range-based TLBI instructions to do the + * invalidations. Condition deferred TLB invalidation on the + * system supporting FWB as the optimization is entirely + * pointless when the unmap walker needs to perform CMOs. + */ + return system_supports_tlb_range() && stage2_has_fwb(pgt); +} + +static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx, + struct kvm_s2_mmu *mmu, + struct kvm_pgtable_mm_ops *mm_ops) +{ + struct kvm_pgtable *pgt = ctx->arg; + + /* + * Clear the existing PTE, and perform break-before-make if it was + * valid. Depending on the system support, defer the TLB maintenance + * for the same until the entire unmap walk is completed. */ if (kvm_pte_valid(ctx->old)) { kvm_clear_pte(ctx->ptep); - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level); + + if (!stage2_unmap_defer_tlb_flush(pgt)) + kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, + ctx->addr, ctx->level); } mm_ops->put_page(ctx->ptep); @@ -1098,7 +1118,7 @@ static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, * block entry and rely on the remaining portions being faulted * back lazily. */ - stage2_put_pte(ctx, mmu, mm_ops); + stage2_unmap_put_pte(ctx, mmu, mm_ops); if (need_flush && mm_ops->dcache_clean_inval_poc) mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops), @@ -1112,13 +1132,19 @@ static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) { + int ret; struct kvm_pgtable_walker walker = { .cb = stage2_unmap_walker, .arg = pgt, .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, }; - return kvm_pgtable_walk(pgt, addr, size, &walker); + ret = kvm_pgtable_walk(pgt, addr, size, &walker); + if (stage2_unmap_defer_tlb_flush(pgt)) + /* Perform the deferred TLB invalidations */ + kvm_tlb_flush_vmid_range(pgt->mmu, addr, size); + + return ret; } struct stage2_attr_data { -- cgit From 21f74a51373791732baa0d672a604afa76d5718d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:35 +0100 Subject: arm64: Add missing VA CMO encodings Add the missing VA-based CMOs encodings. Reviewed-by: Eric Auger Reviewed-by: Miguel Luis Acked-by: Catalin Marinas Reviewed-by: Zenghui Yu Reviewed-by: Jing Zhang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230815183903.2735724-2-maz@kernel.org --- arch/arm64/include/asm/sysreg.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b481935e9314..85447e68951a 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -124,6 +124,32 @@ #define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4) #define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6) +#define SYS_IC_IALLUIS sys_insn(1, 0, 7, 1, 0) +#define SYS_IC_IALLU sys_insn(1, 0, 7, 5, 0) +#define SYS_IC_IVAU sys_insn(1, 3, 7, 5, 1) + +#define SYS_DC_IVAC sys_insn(1, 0, 7, 6, 1) +#define SYS_DC_IGVAC sys_insn(1, 0, 7, 6, 3) +#define SYS_DC_IGDVAC sys_insn(1, 0, 7, 6, 5) + +#define SYS_DC_CVAC sys_insn(1, 3, 7, 10, 1) +#define SYS_DC_CGVAC sys_insn(1, 3, 7, 10, 3) +#define SYS_DC_CGDVAC sys_insn(1, 3, 7, 10, 5) + +#define SYS_DC_CVAU sys_insn(1, 3, 7, 11, 1) + +#define SYS_DC_CVAP sys_insn(1, 3, 7, 12, 1) +#define SYS_DC_CGVAP sys_insn(1, 3, 7, 12, 3) +#define SYS_DC_CGDVAP sys_insn(1, 3, 7, 12, 5) + +#define SYS_DC_CVADP sys_insn(1, 3, 7, 13, 1) +#define SYS_DC_CGVADP sys_insn(1, 3, 7, 13, 3) +#define SYS_DC_CGDVADP sys_insn(1, 3, 7, 13, 5) + +#define SYS_DC_CIVAC sys_insn(1, 3, 7, 14, 1) +#define SYS_DC_CIGVAC sys_insn(1, 3, 7, 14, 3) +#define SYS_DC_CIGDVAC sys_insn(1, 3, 7, 14, 5) + /* * Automatically generated definitions for system registers, the * manual encodings below are in the process of being converted to -- cgit From 464f2164da7e4cb50faec9d56226b22c9b36cdda Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:36 +0100 Subject: arm64: Add missing ERX*_EL1 encodings We only describe a few of the ERX*_EL1 registers. Add the missing ones (ERXPFGF_EL1, ERXPFGCTL_EL1, ERXPFGCDN_EL1, ERXMISC2_EL1 and ERXMISC3_EL1). Reviewed-by: Eric Auger Reviewed-by: Miguel Luis Acked-by: Catalin Marinas Reviewed-by: Zenghui Yu Reviewed-by: Jing Zhang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230815183903.2735724-3-maz@kernel.org --- arch/arm64/include/asm/sysreg.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 85447e68951a..ed2739897859 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -229,8 +229,13 @@ #define SYS_ERXCTLR_EL1 sys_reg(3, 0, 5, 4, 1) #define SYS_ERXSTATUS_EL1 sys_reg(3, 0, 5, 4, 2) #define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3) +#define SYS_ERXPFGF_EL1 sys_reg(3, 0, 5, 4, 4) +#define SYS_ERXPFGCTL_EL1 sys_reg(3, 0, 5, 4, 5) +#define SYS_ERXPFGCDN_EL1 sys_reg(3, 0, 5, 4, 6) #define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0) #define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1) +#define SYS_ERXMISC2_EL1 sys_reg(3, 0, 5, 5, 2) +#define SYS_ERXMISC3_EL1 sys_reg(3, 0, 5, 5, 3) #define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0) #define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1) -- cgit From 6ddea24dfd59f0fc78a87df54d428e3a6cf3e11f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:37 +0100 Subject: arm64: Add missing DC ZVA/GVA/GZVA encodings Add the missing DC *VA encodings. Reviewed-by: Eric Auger Reviewed-by: Miguel Luis Acked-by: Catalin Marinas Reviewed-by: Zenghui Yu Reviewed-by: Jing Zhang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230815183903.2735724-4-maz@kernel.org --- arch/arm64/include/asm/sysreg.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index ed2739897859..5084add86897 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -150,6 +150,11 @@ #define SYS_DC_CIGVAC sys_insn(1, 3, 7, 14, 3) #define SYS_DC_CIGDVAC sys_insn(1, 3, 7, 14, 5) +/* Data cache zero operations */ +#define SYS_DC_ZVA sys_insn(1, 3, 7, 4, 1) +#define SYS_DC_GVA sys_insn(1, 3, 7, 4, 3) +#define SYS_DC_GZVA sys_insn(1, 3, 7, 4, 4) + /* * Automatically generated definitions for system registers, the * manual encodings below are in the process of being converted to -- cgit From fb1926cccd70a5032448968dfd639187cd894cb7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:38 +0100 Subject: arm64: Add TLBI operation encodings Add all the TLBI encodings that are usable from Non-Secure. Reviewed-by: Eric Auger Acked-by: Catalin Marinas Reviewed-by: Miguel Luis Reviewed-by: Zenghui Yu Reviewed-by: Jing Zhang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230815183903.2735724-5-maz@kernel.org --- arch/arm64/include/asm/sysreg.h | 128 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 128 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 5084add86897..72e18480ce62 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -514,6 +514,134 @@ #define SYS_SP_EL2 sys_reg(3, 6, 4, 1, 0) +/* TLBI instructions */ +#define OP_TLBI_VMALLE1OS sys_insn(1, 0, 8, 1, 0) +#define OP_TLBI_VAE1OS sys_insn(1, 0, 8, 1, 1) +#define OP_TLBI_ASIDE1OS sys_insn(1, 0, 8, 1, 2) +#define OP_TLBI_VAAE1OS sys_insn(1, 0, 8, 1, 3) +#define OP_TLBI_VALE1OS sys_insn(1, 0, 8, 1, 5) +#define OP_TLBI_VAALE1OS sys_insn(1, 0, 8, 1, 7) +#define OP_TLBI_RVAE1IS sys_insn(1, 0, 8, 2, 1) +#define OP_TLBI_RVAAE1IS sys_insn(1, 0, 8, 2, 3) +#define OP_TLBI_RVALE1IS sys_insn(1, 0, 8, 2, 5) +#define OP_TLBI_RVAALE1IS sys_insn(1, 0, 8, 2, 7) +#define OP_TLBI_VMALLE1IS sys_insn(1, 0, 8, 3, 0) +#define OP_TLBI_VAE1IS sys_insn(1, 0, 8, 3, 1) +#define OP_TLBI_ASIDE1IS sys_insn(1, 0, 8, 3, 2) +#define OP_TLBI_VAAE1IS sys_insn(1, 0, 8, 3, 3) +#define OP_TLBI_VALE1IS sys_insn(1, 0, 8, 3, 5) +#define OP_TLBI_VAALE1IS sys_insn(1, 0, 8, 3, 7) +#define OP_TLBI_RVAE1OS sys_insn(1, 0, 8, 5, 1) +#define OP_TLBI_RVAAE1OS sys_insn(1, 0, 8, 5, 3) +#define OP_TLBI_RVALE1OS sys_insn(1, 0, 8, 5, 5) +#define OP_TLBI_RVAALE1OS sys_insn(1, 0, 8, 5, 7) +#define OP_TLBI_RVAE1 sys_insn(1, 0, 8, 6, 1) +#define OP_TLBI_RVAAE1 sys_insn(1, 0, 8, 6, 3) +#define OP_TLBI_RVALE1 sys_insn(1, 0, 8, 6, 5) +#define OP_TLBI_RVAALE1 sys_insn(1, 0, 8, 6, 7) +#define OP_TLBI_VMALLE1 sys_insn(1, 0, 8, 7, 0) +#define OP_TLBI_VAE1 sys_insn(1, 0, 8, 7, 1) +#define OP_TLBI_ASIDE1 sys_insn(1, 0, 8, 7, 2) +#define OP_TLBI_VAAE1 sys_insn(1, 0, 8, 7, 3) +#define OP_TLBI_VALE1 sys_insn(1, 0, 8, 7, 5) +#define OP_TLBI_VAALE1 sys_insn(1, 0, 8, 7, 7) +#define OP_TLBI_VMALLE1OSNXS sys_insn(1, 0, 9, 1, 0) +#define OP_TLBI_VAE1OSNXS sys_insn(1, 0, 9, 1, 1) +#define OP_TLBI_ASIDE1OSNXS sys_insn(1, 0, 9, 1, 2) +#define OP_TLBI_VAAE1OSNXS sys_insn(1, 0, 9, 1, 3) +#define OP_TLBI_VALE1OSNXS sys_insn(1, 0, 9, 1, 5) +#define OP_TLBI_VAALE1OSNXS sys_insn(1, 0, 9, 1, 7) +#define OP_TLBI_RVAE1ISNXS sys_insn(1, 0, 9, 2, 1) +#define OP_TLBI_RVAAE1ISNXS sys_insn(1, 0, 9, 2, 3) +#define OP_TLBI_RVALE1ISNXS sys_insn(1, 0, 9, 2, 5) +#define OP_TLBI_RVAALE1ISNXS sys_insn(1, 0, 9, 2, 7) +#define OP_TLBI_VMALLE1ISNXS sys_insn(1, 0, 9, 3, 0) +#define OP_TLBI_VAE1ISNXS sys_insn(1, 0, 9, 3, 1) +#define OP_TLBI_ASIDE1ISNXS sys_insn(1, 0, 9, 3, 2) +#define OP_TLBI_VAAE1ISNXS sys_insn(1, 0, 9, 3, 3) +#define OP_TLBI_VALE1ISNXS sys_insn(1, 0, 9, 3, 5) +#define OP_TLBI_VAALE1ISNXS sys_insn(1, 0, 9, 3, 7) +#define OP_TLBI_RVAE1OSNXS sys_insn(1, 0, 9, 5, 1) +#define OP_TLBI_RVAAE1OSNXS sys_insn(1, 0, 9, 5, 3) +#define OP_TLBI_RVALE1OSNXS sys_insn(1, 0, 9, 5, 5) +#define OP_TLBI_RVAALE1OSNXS sys_insn(1, 0, 9, 5, 7) +#define OP_TLBI_RVAE1NXS sys_insn(1, 0, 9, 6, 1) +#define OP_TLBI_RVAAE1NXS sys_insn(1, 0, 9, 6, 3) +#define OP_TLBI_RVALE1NXS sys_insn(1, 0, 9, 6, 5) +#define OP_TLBI_RVAALE1NXS sys_insn(1, 0, 9, 6, 7) +#define OP_TLBI_VMALLE1NXS sys_insn(1, 0, 9, 7, 0) +#define OP_TLBI_VAE1NXS sys_insn(1, 0, 9, 7, 1) +#define OP_TLBI_ASIDE1NXS sys_insn(1, 0, 9, 7, 2) +#define OP_TLBI_VAAE1NXS sys_insn(1, 0, 9, 7, 3) +#define OP_TLBI_VALE1NXS sys_insn(1, 0, 9, 7, 5) +#define OP_TLBI_VAALE1NXS sys_insn(1, 0, 9, 7, 7) +#define OP_TLBI_IPAS2E1IS sys_insn(1, 4, 8, 0, 1) +#define OP_TLBI_RIPAS2E1IS sys_insn(1, 4, 8, 0, 2) +#define OP_TLBI_IPAS2LE1IS sys_insn(1, 4, 8, 0, 5) +#define OP_TLBI_RIPAS2LE1IS sys_insn(1, 4, 8, 0, 6) +#define OP_TLBI_ALLE2OS sys_insn(1, 4, 8, 1, 0) +#define OP_TLBI_VAE2OS sys_insn(1, 4, 8, 1, 1) +#define OP_TLBI_ALLE1OS sys_insn(1, 4, 8, 1, 4) +#define OP_TLBI_VALE2OS sys_insn(1, 4, 8, 1, 5) +#define OP_TLBI_VMALLS12E1OS sys_insn(1, 4, 8, 1, 6) +#define OP_TLBI_RVAE2IS sys_insn(1, 4, 8, 2, 1) +#define OP_TLBI_RVALE2IS sys_insn(1, 4, 8, 2, 5) +#define OP_TLBI_ALLE2IS sys_insn(1, 4, 8, 3, 0) +#define OP_TLBI_VAE2IS sys_insn(1, 4, 8, 3, 1) +#define OP_TLBI_ALLE1IS sys_insn(1, 4, 8, 3, 4) +#define OP_TLBI_VALE2IS sys_insn(1, 4, 8, 3, 5) +#define OP_TLBI_VMALLS12E1IS sys_insn(1, 4, 8, 3, 6) +#define OP_TLBI_IPAS2E1OS sys_insn(1, 4, 8, 4, 0) +#define OP_TLBI_IPAS2E1 sys_insn(1, 4, 8, 4, 1) +#define OP_TLBI_RIPAS2E1 sys_insn(1, 4, 8, 4, 2) +#define OP_TLBI_RIPAS2E1OS sys_insn(1, 4, 8, 4, 3) +#define OP_TLBI_IPAS2LE1OS sys_insn(1, 4, 8, 4, 4) +#define OP_TLBI_IPAS2LE1 sys_insn(1, 4, 8, 4, 5) +#define OP_TLBI_RIPAS2LE1 sys_insn(1, 4, 8, 4, 6) +#define OP_TLBI_RIPAS2LE1OS sys_insn(1, 4, 8, 4, 7) +#define OP_TLBI_RVAE2OS sys_insn(1, 4, 8, 5, 1) +#define OP_TLBI_RVALE2OS sys_insn(1, 4, 8, 5, 5) +#define OP_TLBI_RVAE2 sys_insn(1, 4, 8, 6, 1) +#define OP_TLBI_RVALE2 sys_insn(1, 4, 8, 6, 5) +#define OP_TLBI_ALLE2 sys_insn(1, 4, 8, 7, 0) +#define OP_TLBI_VAE2 sys_insn(1, 4, 8, 7, 1) +#define OP_TLBI_ALLE1 sys_insn(1, 4, 8, 7, 4) +#define OP_TLBI_VALE2 sys_insn(1, 4, 8, 7, 5) +#define OP_TLBI_VMALLS12E1 sys_insn(1, 4, 8, 7, 6) +#define OP_TLBI_IPAS2E1ISNXS sys_insn(1, 4, 9, 0, 1) +#define OP_TLBI_RIPAS2E1ISNXS sys_insn(1, 4, 9, 0, 2) +#define OP_TLBI_IPAS2LE1ISNXS sys_insn(1, 4, 9, 0, 5) +#define OP_TLBI_RIPAS2LE1ISNXS sys_insn(1, 4, 9, 0, 6) +#define OP_TLBI_ALLE2OSNXS sys_insn(1, 4, 9, 1, 0) +#define OP_TLBI_VAE2OSNXS sys_insn(1, 4, 9, 1, 1) +#define OP_TLBI_ALLE1OSNXS sys_insn(1, 4, 9, 1, 4) +#define OP_TLBI_VALE2OSNXS sys_insn(1, 4, 9, 1, 5) +#define OP_TLBI_VMALLS12E1OSNXS sys_insn(1, 4, 9, 1, 6) +#define OP_TLBI_RVAE2ISNXS sys_insn(1, 4, 9, 2, 1) +#define OP_TLBI_RVALE2ISNXS sys_insn(1, 4, 9, 2, 5) +#define OP_TLBI_ALLE2ISNXS sys_insn(1, 4, 9, 3, 0) +#define OP_TLBI_VAE2ISNXS sys_insn(1, 4, 9, 3, 1) +#define OP_TLBI_ALLE1ISNXS sys_insn(1, 4, 9, 3, 4) +#define OP_TLBI_VALE2ISNXS sys_insn(1, 4, 9, 3, 5) +#define OP_TLBI_VMALLS12E1ISNXS sys_insn(1, 4, 9, 3, 6) +#define OP_TLBI_IPAS2E1OSNXS sys_insn(1, 4, 9, 4, 0) +#define OP_TLBI_IPAS2E1NXS sys_insn(1, 4, 9, 4, 1) +#define OP_TLBI_RIPAS2E1NXS sys_insn(1, 4, 9, 4, 2) +#define OP_TLBI_RIPAS2E1OSNXS sys_insn(1, 4, 9, 4, 3) +#define OP_TLBI_IPAS2LE1OSNXS sys_insn(1, 4, 9, 4, 4) +#define OP_TLBI_IPAS2LE1NXS sys_insn(1, 4, 9, 4, 5) +#define OP_TLBI_RIPAS2LE1NXS sys_insn(1, 4, 9, 4, 6) +#define OP_TLBI_RIPAS2LE1OSNXS sys_insn(1, 4, 9, 4, 7) +#define OP_TLBI_RVAE2OSNXS sys_insn(1, 4, 9, 5, 1) +#define OP_TLBI_RVALE2OSNXS sys_insn(1, 4, 9, 5, 5) +#define OP_TLBI_RVAE2NXS sys_insn(1, 4, 9, 6, 1) +#define OP_TLBI_RVALE2NXS sys_insn(1, 4, 9, 6, 5) +#define OP_TLBI_ALLE2NXS sys_insn(1, 4, 9, 7, 0) +#define OP_TLBI_VAE2NXS sys_insn(1, 4, 9, 7, 1) +#define OP_TLBI_ALLE1NXS sys_insn(1, 4, 9, 7, 4) +#define OP_TLBI_VALE2NXS sys_insn(1, 4, 9, 7, 5) +#define OP_TLBI_VMALLS12E1NXS sys_insn(1, 4, 9, 7, 6) + /* Common SCTLR_ELx flags. */ #define SCTLR_ELx_ENTP2 (BIT(60)) #define SCTLR_ELx_DSSBS (BIT(44)) -- cgit From 2b97411fef8ff9dafc862971f08382f780dc5357 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:39 +0100 Subject: arm64: Add AT operation encodings Add the encodings for the AT operation that are usable from NS. Reviewed-by: Eric Auger Acked-by: Catalin Marinas Reviewed-by: Miguel Luis Reviewed-by: Zenghui Yu Reviewed-by: Jing Zhang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230815183903.2735724-6-maz@kernel.org --- arch/arm64/include/asm/sysreg.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 72e18480ce62..76289339b43b 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -514,6 +514,23 @@ #define SYS_SP_EL2 sys_reg(3, 6, 4, 1, 0) +/* AT instructions */ +#define AT_Op0 1 +#define AT_CRn 7 + +#define OP_AT_S1E1R sys_insn(AT_Op0, 0, AT_CRn, 8, 0) +#define OP_AT_S1E1W sys_insn(AT_Op0, 0, AT_CRn, 8, 1) +#define OP_AT_S1E0R sys_insn(AT_Op0, 0, AT_CRn, 8, 2) +#define OP_AT_S1E0W sys_insn(AT_Op0, 0, AT_CRn, 8, 3) +#define OP_AT_S1E1RP sys_insn(AT_Op0, 0, AT_CRn, 9, 0) +#define OP_AT_S1E1WP sys_insn(AT_Op0, 0, AT_CRn, 9, 1) +#define OP_AT_S1E2R sys_insn(AT_Op0, 4, AT_CRn, 8, 0) +#define OP_AT_S1E2W sys_insn(AT_Op0, 4, AT_CRn, 8, 1) +#define OP_AT_S12E1R sys_insn(AT_Op0, 4, AT_CRn, 8, 4) +#define OP_AT_S12E1W sys_insn(AT_Op0, 4, AT_CRn, 8, 5) +#define OP_AT_S12E0R sys_insn(AT_Op0, 4, AT_CRn, 8, 6) +#define OP_AT_S12E0W sys_insn(AT_Op0, 4, AT_CRn, 8, 7) + /* TLBI instructions */ #define OP_TLBI_VMALLE1OS sys_insn(1, 0, 8, 1, 0) #define OP_TLBI_VAE1OS sys_insn(1, 0, 8, 1, 1) -- cgit From 57596c8f991c9aace47d75b31249b8ec36b3b899 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:40 +0100 Subject: arm64: Add debug registers affected by HDFGxTR_EL2 The HDFGxTR_EL2 registers trap a (huge) set of debug and trace related registers. Add their encodings (and only that, because we really don't care about what these registers actually do at this stage). Acked-by: Catalin Marinas Reviewed-by: Eric Auger Reviewed-by: Jing Zhang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230815183903.2735724-7-maz@kernel.org --- arch/arm64/include/asm/sysreg.h | 76 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 76289339b43b..bb5a0877a210 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -194,6 +194,82 @@ #define SYS_DBGDTRTX_EL0 sys_reg(2, 3, 0, 5, 0) #define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0) +#define SYS_BRBINF_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 0)) +#define SYS_BRBINFINJ_EL1 sys_reg(2, 1, 9, 1, 0) +#define SYS_BRBSRC_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 1)) +#define SYS_BRBSRCINJ_EL1 sys_reg(2, 1, 9, 1, 1) +#define SYS_BRBTGT_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 2)) +#define SYS_BRBTGTINJ_EL1 sys_reg(2, 1, 9, 1, 2) +#define SYS_BRBTS_EL1 sys_reg(2, 1, 9, 0, 2) + +#define SYS_BRBCR_EL1 sys_reg(2, 1, 9, 0, 0) +#define SYS_BRBFCR_EL1 sys_reg(2, 1, 9, 0, 1) +#define SYS_BRBIDR0_EL1 sys_reg(2, 1, 9, 2, 0) + +#define SYS_TRCITECR_EL1 sys_reg(3, 0, 1, 2, 3) +#define SYS_TRCACATR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (2 | (m >> 3))) +#define SYS_TRCACVR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (0 | (m >> 3))) +#define SYS_TRCAUTHSTATUS sys_reg(2, 1, 7, 14, 6) +#define SYS_TRCAUXCTLR sys_reg(2, 1, 0, 6, 0) +#define SYS_TRCBBCTLR sys_reg(2, 1, 0, 15, 0) +#define SYS_TRCCCCTLR sys_reg(2, 1, 0, 14, 0) +#define SYS_TRCCIDCCTLR0 sys_reg(2, 1, 3, 0, 2) +#define SYS_TRCCIDCCTLR1 sys_reg(2, 1, 3, 1, 2) +#define SYS_TRCCIDCVR(m) sys_reg(2, 1, 3, ((m & 7) << 1), 0) +#define SYS_TRCCLAIMCLR sys_reg(2, 1, 7, 9, 6) +#define SYS_TRCCLAIMSET sys_reg(2, 1, 7, 8, 6) +#define SYS_TRCCNTCTLR(m) sys_reg(2, 1, 0, (4 | (m & 3)), 5) +#define SYS_TRCCNTRLDVR(m) sys_reg(2, 1, 0, (0 | (m & 3)), 5) +#define SYS_TRCCNTVR(m) sys_reg(2, 1, 0, (8 | (m & 3)), 5) +#define SYS_TRCCONFIGR sys_reg(2, 1, 0, 4, 0) +#define SYS_TRCDEVARCH sys_reg(2, 1, 7, 15, 6) +#define SYS_TRCDEVID sys_reg(2, 1, 7, 2, 7) +#define SYS_TRCEVENTCTL0R sys_reg(2, 1, 0, 8, 0) +#define SYS_TRCEVENTCTL1R sys_reg(2, 1, 0, 9, 0) +#define SYS_TRCEXTINSELR(m) sys_reg(2, 1, 0, (8 | (m & 3)), 4) +#define SYS_TRCIDR0 sys_reg(2, 1, 0, 8, 7) +#define SYS_TRCIDR10 sys_reg(2, 1, 0, 2, 6) +#define SYS_TRCIDR11 sys_reg(2, 1, 0, 3, 6) +#define SYS_TRCIDR12 sys_reg(2, 1, 0, 4, 6) +#define SYS_TRCIDR13 sys_reg(2, 1, 0, 5, 6) +#define SYS_TRCIDR1 sys_reg(2, 1, 0, 9, 7) +#define SYS_TRCIDR2 sys_reg(2, 1, 0, 10, 7) +#define SYS_TRCIDR3 sys_reg(2, 1, 0, 11, 7) +#define SYS_TRCIDR4 sys_reg(2, 1, 0, 12, 7) +#define SYS_TRCIDR5 sys_reg(2, 1, 0, 13, 7) +#define SYS_TRCIDR6 sys_reg(2, 1, 0, 14, 7) +#define SYS_TRCIDR7 sys_reg(2, 1, 0, 15, 7) +#define SYS_TRCIDR8 sys_reg(2, 1, 0, 0, 6) +#define SYS_TRCIDR9 sys_reg(2, 1, 0, 1, 6) +#define SYS_TRCIMSPEC(m) sys_reg(2, 1, 0, (m & 7), 7) +#define SYS_TRCITEEDCR sys_reg(2, 1, 0, 2, 1) +#define SYS_TRCOSLSR sys_reg(2, 1, 1, 1, 4) +#define SYS_TRCPRGCTLR sys_reg(2, 1, 0, 1, 0) +#define SYS_TRCQCTLR sys_reg(2, 1, 0, 1, 1) +#define SYS_TRCRSCTLR(m) sys_reg(2, 1, 1, (m & 15), (0 | (m >> 4))) +#define SYS_TRCRSR sys_reg(2, 1, 0, 10, 0) +#define SYS_TRCSEQEVR(m) sys_reg(2, 1, 0, (m & 3), 4) +#define SYS_TRCSEQRSTEVR sys_reg(2, 1, 0, 6, 4) +#define SYS_TRCSEQSTR sys_reg(2, 1, 0, 7, 4) +#define SYS_TRCSSCCR(m) sys_reg(2, 1, 1, (m & 7), 2) +#define SYS_TRCSSCSR(m) sys_reg(2, 1, 1, (8 | (m & 7)), 2) +#define SYS_TRCSSPCICR(m) sys_reg(2, 1, 1, (m & 7), 3) +#define SYS_TRCSTALLCTLR sys_reg(2, 1, 0, 11, 0) +#define SYS_TRCSTATR sys_reg(2, 1, 0, 3, 0) +#define SYS_TRCSYNCPR sys_reg(2, 1, 0, 13, 0) +#define SYS_TRCTRACEIDR sys_reg(2, 1, 0, 0, 1) +#define SYS_TRCTSCTLR sys_reg(2, 1, 0, 12, 0) +#define SYS_TRCVICTLR sys_reg(2, 1, 0, 0, 2) +#define SYS_TRCVIIECTLR sys_reg(2, 1, 0, 1, 2) +#define SYS_TRCVIPCSSCTLR sys_reg(2, 1, 0, 3, 2) +#define SYS_TRCVISSCTLR sys_reg(2, 1, 0, 2, 2) +#define SYS_TRCVMIDCCTLR0 sys_reg(2, 1, 3, 2, 2) +#define SYS_TRCVMIDCCTLR1 sys_reg(2, 1, 3, 3, 2) +#define SYS_TRCVMIDCVR(m) sys_reg(2, 1, 3, ((m & 7) << 1), 1) + +/* ETM */ +#define SYS_TRCOSLAR sys_reg(2, 1, 1, 0, 4) + #define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0) #define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5) #define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6) -- cgit From 2b062ed483ebd625b6c6054b9d29d600bd755a86 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:41 +0100 Subject: arm64: Add missing BRB/CFP/DVP/CPP instructions HFGITR_EL2 traps a bunch of instructions for which we don't have encodings yet. Add them. Reviewed-by: Miguel Luis Reviewed-by: Eric Auger Acked-by: Catalin Marinas Reviewed-by: Jing Zhang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230815183903.2735724-8-maz@kernel.org --- arch/arm64/include/asm/sysreg.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index bb5a0877a210..6d9d7ac4b31c 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -735,6 +735,13 @@ #define OP_TLBI_VALE2NXS sys_insn(1, 4, 9, 7, 5) #define OP_TLBI_VMALLS12E1NXS sys_insn(1, 4, 9, 7, 6) +/* Misc instructions */ +#define OP_BRB_IALL sys_insn(1, 1, 7, 2, 4) +#define OP_BRB_INJ sys_insn(1, 1, 7, 2, 5) +#define OP_CFP_RCTX sys_insn(1, 3, 7, 3, 4) +#define OP_DVP_RCTX sys_insn(1, 3, 7, 3, 5) +#define OP_CPP_RCTX sys_insn(1, 3, 7, 3, 7) + /* Common SCTLR_ELx flags. */ #define SCTLR_ELx_ENTP2 (BIT(60)) #define SCTLR_ELx_DSSBS (BIT(44)) -- cgit From cc24f656f7cf834f384a43fc6fe68ec62730743d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:42 +0100 Subject: arm64: Add HDFGRTR_EL2 and HDFGWTR_EL2 layouts As we're about to implement full support for FEAT_FGT, add the full HDFGRTR_EL2 and HDFGWTR_EL2 layouts. Reviewed-by: Mark Brown Acked-by: Catalin Marinas Reviewed-by: Miguel Luis Reviewed-by: Jing Zhang Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230815183903.2735724-9-maz@kernel.org --- arch/arm64/include/asm/sysreg.h | 2 - arch/arm64/tools/sysreg | 129 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 129 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6d9d7ac4b31c..043c677e9f04 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -495,8 +495,6 @@ #define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2) #define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) -#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4) -#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5) #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) #define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 65866bf819c3..2517ef7c21cf 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -2156,6 +2156,135 @@ Field 1 ICIALLU Field 0 ICIALLUIS EndSysreg +Sysreg HDFGRTR_EL2 3 4 3 1 4 +Field 63 PMBIDR_EL1 +Field 62 nPMSNEVFR_EL1 +Field 61 nBRBDATA +Field 60 nBRBCTL +Field 59 nBRBIDR +Field 58 PMCEIDn_EL0 +Field 57 PMUSERENR_EL0 +Field 56 TRBTRG_EL1 +Field 55 TRBSR_EL1 +Field 54 TRBPTR_EL1 +Field 53 TRBMAR_EL1 +Field 52 TRBLIMITR_EL1 +Field 51 TRBIDR_EL1 +Field 50 TRBBASER_EL1 +Res0 49 +Field 48 TRCVICTLR +Field 47 TRCSTATR +Field 46 TRCSSCSRn +Field 45 TRCSEQSTR +Field 44 TRCPRGCTLR +Field 43 TRCOSLSR +Res0 42 +Field 41 TRCIMSPECn +Field 40 TRCID +Res0 39:38 +Field 37 TRCCNTVRn +Field 36 TRCCLAIM +Field 35 TRCAUXCTLR +Field 34 TRCAUTHSTATUS +Field 33 TRC +Field 32 PMSLATFR_EL1 +Field 31 PMSIRR_EL1 +Field 30 PMSIDR_EL1 +Field 29 PMSICR_EL1 +Field 28 PMSFCR_EL1 +Field 27 PMSEVFR_EL1 +Field 26 PMSCR_EL1 +Field 25 PMBSR_EL1 +Field 24 PMBPTR_EL1 +Field 23 PMBLIMITR_EL1 +Field 22 PMMIR_EL1 +Res0 21:20 +Field 19 PMSELR_EL0 +Field 18 PMOVS +Field 17 PMINTEN +Field 16 PMCNTEN +Field 15 PMCCNTR_EL0 +Field 14 PMCCFILTR_EL0 +Field 13 PMEVTYPERn_EL0 +Field 12 PMEVCNTRn_EL0 +Field 11 OSDLR_EL1 +Field 10 OSECCR_EL1 +Field 9 OSLSR_EL1 +Res0 8 +Field 7 DBGPRCR_EL1 +Field 6 DBGAUTHSTATUS_EL1 +Field 5 DBGCLAIM +Field 4 MDSCR_EL1 +Field 3 DBGWVRn_EL1 +Field 2 DBGWCRn_EL1 +Field 1 DBGBVRn_EL1 +Field 0 DBGBCRn_EL1 +EndSysreg + +Sysreg HDFGWTR_EL2 3 4 3 1 5 +Res0 63 +Field 62 nPMSNEVFR_EL1 +Field 61 nBRBDATA +Field 60 nBRBCTL +Res0 59:58 +Field 57 PMUSERENR_EL0 +Field 56 TRBTRG_EL1 +Field 55 TRBSR_EL1 +Field 54 TRBPTR_EL1 +Field 53 TRBMAR_EL1 +Field 52 TRBLIMITR_EL1 +Res0 51 +Field 50 TRBBASER_EL1 +Field 49 TRFCR_EL1 +Field 48 TRCVICTLR +Res0 47 +Field 46 TRCSSCSRn +Field 45 TRCSEQSTR +Field 44 TRCPRGCTLR +Res0 43 +Field 42 TRCOSLAR +Field 41 TRCIMSPECn +Res0 40:38 +Field 37 TRCCNTVRn +Field 36 TRCCLAIM +Field 35 TRCAUXCTLR +Res0 34 +Field 33 TRC +Field 32 PMSLATFR_EL1 +Field 31 PMSIRR_EL1 +Res0 30 +Field 29 PMSICR_EL1 +Field 28 PMSFCR_EL1 +Field 27 PMSEVFR_EL1 +Field 26 PMSCR_EL1 +Field 25 PMBSR_EL1 +Field 24 PMBPTR_EL1 +Field 23 PMBLIMITR_EL1 +Res0 22 +Field 21 PMCR_EL0 +Field 20 PMSWINC_EL0 +Field 19 PMSELR_EL0 +Field 18 PMOVS +Field 17 PMINTEN +Field 16 PMCNTEN +Field 15 PMCCNTR_EL0 +Field 14 PMCCFILTR_EL0 +Field 13 PMEVTYPERn_EL0 +Field 12 PMEVCNTRn_EL0 +Field 11 OSDLR_EL1 +Field 10 OSECCR_EL1 +Res0 9 +Field 8 OSLAR_EL1 +Field 7 DBGPRCR_EL1 +Res0 6 +Field 5 DBGCLAIM +Field 4 MDSCR_EL1 +Field 3 DBGWVRn_EL1 +Field 2 DBGWCRn_EL1 +Field 1 DBGBVRn_EL1 +Field 0 DBGBCRn_EL1 +EndSysreg + Sysreg ZCR_EL2 3 4 1 2 0 Fields ZCR_ELx EndSysreg -- cgit From b206a708cbfb352f2191089678ab595d24563011 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 15 Aug 2023 19:38:43 +0100 Subject: arm64: Add feature detection for fine grained traps In order to allow us to have shared code for managing fine grained traps for KVM guests add it as a detected feature rather than relying on it being a dependency of other features. Acked-by: Catalin Marinas Reviewed-by: Eric Auger Signed-off-by: Mark Brown [maz: converted to ARM64_CPUID_FIELDS()] Link: https://lore.kernel.org/r/20230301-kvm-arm64-fgt-v4-1-1bf8d235ac1f@kernel.org Reviewed-by: Zenghui Yu Reviewed-by: Miguel Luis Reviewed-by: Jing Zhang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230815183903.2735724-10-maz@kernel.org --- arch/arm64/kernel/cpufeature.c | 7 +++++++ arch/arm64/tools/cpucaps | 1 + 2 files changed, 8 insertions(+) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f9d456fe132d..668e2872a086 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2627,6 +2627,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LRCPC, IMP) }, + { + .desc = "Fine Grained Traps", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_HAS_FGT, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, FGT, IMP) + }, #ifdef CONFIG_ARM64_SME { .desc = "Scalable Matrix Extension", diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index c80ed4f3cbce..c3f06fdef609 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -26,6 +26,7 @@ HAS_ECV HAS_ECV_CNTPOFF HAS_EPAN HAS_EVT +HAS_FGT HAS_GENERIC_AUTH HAS_GENERIC_AUTH_ARCH_QARMA3 HAS_GENERIC_AUTH_ARCH_QARMA5 -- cgit From 484f86824a3d94c6d9412618dd70b1d5923fff6f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:44 +0100 Subject: KVM: arm64: Correctly handle ACCDATA_EL1 traps As we blindly reset some HFGxTR_EL2 bits to 0, we also randomly trap unsuspecting sysregs that have their trap bits with a negative polarity. ACCDATA_EL1 is one such register that can be accessed by the guest, causing a splat on the host as we don't have a proper handler for it. Adding such handler addresses the issue, though there are a number of other registers missing as the current architecture documentation doesn't describe them yet. Reviewed-by: Eric Auger Reviewed-by: Miguel Luis Reviewed-by: Jing Zhang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230815183903.2735724-11-maz@kernel.org --- arch/arm64/include/asm/sysreg.h | 2 ++ arch/arm64/kvm/sys_regs.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 043c677e9f04..818c111009ca 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -387,6 +387,8 @@ #define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6) #define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) +#define SYS_ACCDATA_EL1 sys_reg(3, 0, 13, 0, 5) + #define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2ca2973abe66..38f221f9fc98 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2151,6 +2151,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 }, + { SYS_DESC(SYS_ACCDATA_EL1), undef_access }, + { SYS_DESC(SYS_SCXTNUM_EL1), undef_access }, { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0}, -- cgit From 3ea84b4fe446319625be64945793b8540ca15f84 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:45 +0100 Subject: KVM: arm64: Add missing HCR_EL2 trap bits We're still missing a handfull of HCR_EL2 trap bits. Add them. Reviewed-by: Eric Auger Reviewed-by: Miguel Luis Reviewed-by: Jing Zhang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230815183903.2735724-12-maz@kernel.org --- arch/arm64/include/asm/kvm_arm.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 58e5eb27da68..028049b147df 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -18,10 +18,19 @@ #define HCR_DCT (UL(1) << 57) #define HCR_ATA_SHIFT 56 #define HCR_ATA (UL(1) << HCR_ATA_SHIFT) +#define HCR_TTLBOS (UL(1) << 55) +#define HCR_TTLBIS (UL(1) << 54) +#define HCR_ENSCXT (UL(1) << 53) +#define HCR_TOCU (UL(1) << 52) #define HCR_AMVOFFEN (UL(1) << 51) +#define HCR_TICAB (UL(1) << 50) #define HCR_TID4 (UL(1) << 49) #define HCR_FIEN (UL(1) << 47) #define HCR_FWB (UL(1) << 46) +#define HCR_NV2 (UL(1) << 45) +#define HCR_AT (UL(1) << 44) +#define HCR_NV1 (UL(1) << 43) +#define HCR_NV (UL(1) << 42) #define HCR_API (UL(1) << 41) #define HCR_APK (UL(1) << 40) #define HCR_TEA (UL(1) << 37) -- cgit From 50d2fe4648c50e7d33fa576f6b078f22ad973670 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:46 +0100 Subject: KVM: arm64: nv: Add FGT registers Add the 5 registers covering FEAT_FGT. The AMU-related registers are currently left out as we don't have a plan for them. Yet. Reviewed-by: Eric Auger Reviewed-by: Miguel Luis Reviewed-by: Jing Zhang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230815183903.2735724-13-maz@kernel.org --- arch/arm64/include/asm/kvm_host.h | 5 +++++ arch/arm64/kvm/sys_regs.c | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index d3dd05bbfe23..721680da1011 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -400,6 +400,11 @@ enum vcpu_sysreg { TPIDR_EL2, /* EL2 Software Thread ID Register */ CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */ SP_EL2, /* EL2 Stack Pointer */ + HFGRTR_EL2, + HFGWTR_EL2, + HFGITR_EL2, + HDFGRTR_EL2, + HDFGWTR_EL2, CNTHP_CTL_EL2, CNTHP_CVAL_EL2, CNTHV_CTL_EL2, diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 38f221f9fc98..f5baaa508926 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2367,6 +2367,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG(MDCR_EL2, access_rw, reset_val, 0), EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1), EL2_REG(HSTR_EL2, access_rw, reset_val, 0), + EL2_REG(HFGRTR_EL2, access_rw, reset_val, 0), + EL2_REG(HFGWTR_EL2, access_rw, reset_val, 0), + EL2_REG(HFGITR_EL2, access_rw, reset_val, 0), EL2_REG(HACR_EL2, access_rw, reset_val, 0), EL2_REG(TTBR0_EL2, access_rw, reset_val, 0), @@ -2376,6 +2379,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG(VTCR_EL2, access_rw, reset_val, 0), { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 }, + EL2_REG(HDFGRTR_EL2, access_rw, reset_val, 0), + EL2_REG(HDFGWTR_EL2, access_rw, reset_val, 0), EL2_REG(SPSR_EL2, access_rw, reset_val, 0), EL2_REG(ELR_EL2, access_rw, reset_val, 0), { SYS_DESC(SYS_SP_EL1), access_sp_el1}, -- cgit From e930694e6145eb210c9931914a7801cc61016a82 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:47 +0100 Subject: KVM: arm64: Restructure FGT register switching As we're about to majorly extend the handling of FGT registers, restructure the code to actually save/restore the registers as required. This is made easy thanks to the previous addition of the EL2 registers, allowing us to use the host context for this purpose. Reviewed-by: Eric Auger Reviewed-by: Oliver Upton Reviewed-by: Miguel Luis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230815183903.2735724-14-maz@kernel.org --- arch/arm64/include/asm/kvm_arm.h | 21 +++++++++++++ arch/arm64/kvm/hyp/include/hyp/switch.h | 56 +++++++++++++++++---------------- 2 files changed, 50 insertions(+), 27 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 028049b147df..85908aa18908 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -333,6 +333,27 @@ BIT(18) | \ GENMASK(16, 15)) +/* + * FGT register definitions + * + * RES0 and polarity masks as of DDI0487J.a, to be updated as needed. + * We're not using the generated masks as they are usually ahead of + * the published ARM ARM, which we use as a reference. + * + * Once we get to a point where the two describe the same thing, we'll + * merge the definitions. One day. + */ +#define __HFGRTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51)) +#define __HFGRTR_EL2_MASK GENMASK(49, 0) +#define __HFGRTR_EL2_nMASK (GENMASK(55, 54) | BIT(50)) + +#define __HFGWTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51) | \ + BIT(46) | BIT(42) | BIT(40) | BIT(28) | \ + GENMASK(26, 25) | BIT(21) | BIT(18) | \ + GENMASK(15, 14) | GENMASK(10, 9) | BIT(2)) +#define __HFGWTR_EL2_MASK GENMASK(49, 0) +#define __HFGWTR_EL2_nMASK (GENMASK(55, 54) | BIT(50)) + /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ #define HPFAR_MASK (~UL(0xf)) /* diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 4bddb8541bec..e096b16e85fd 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -70,20 +70,19 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) } } -static inline bool __hfgxtr_traps_required(void) -{ - if (cpus_have_final_cap(ARM64_SME)) - return true; - - if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) - return true; - return false; -} -static inline void __activate_traps_hfgxtr(void) +static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) { + struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp; + u64 r_val, w_val; + + if (!cpus_have_final_cap(ARM64_HAS_FGT)) + return; + + ctxt_sys_reg(hctxt, HFGRTR_EL2) = read_sysreg_s(SYS_HFGRTR_EL2); + ctxt_sys_reg(hctxt, HFGWTR_EL2) = read_sysreg_s(SYS_HFGWTR_EL2); if (cpus_have_final_cap(ARM64_SME)) { tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK; @@ -98,26 +97,31 @@ static inline void __activate_traps_hfgxtr(void) if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) w_set |= HFGxTR_EL2_TCR_EL1_MASK; - sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set); - sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set); + + /* The default is not to trap anything but ACCDATA_EL1 */ + r_val = __HFGRTR_EL2_nMASK & ~HFGxTR_EL2_nACCDATA_EL1; + r_val |= r_set; + r_val &= ~r_clr; + + w_val = __HFGWTR_EL2_nMASK & ~HFGxTR_EL2_nACCDATA_EL1; + w_val |= w_set; + w_val &= ~w_clr; + + write_sysreg_s(r_val, SYS_HFGRTR_EL2); + write_sysreg_s(w_val, SYS_HFGWTR_EL2); } -static inline void __deactivate_traps_hfgxtr(void) +static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) { - u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp; + struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; - if (cpus_have_final_cap(ARM64_SME)) { - tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK; + if (!cpus_have_final_cap(ARM64_HAS_FGT)) + return; - r_set |= tmp; - w_set |= tmp; - } + write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2); + write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2); - if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) - w_clr |= HFGxTR_EL2_TCR_EL1_MASK; - sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set); - sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set); } static inline void __activate_traps_common(struct kvm_vcpu *vcpu) @@ -145,8 +149,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); - if (__hfgxtr_traps_required()) - __activate_traps_hfgxtr(); + __activate_traps_hfgxtr(vcpu); } static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) @@ -162,8 +165,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU); } - if (__hfgxtr_traps_required()) - __deactivate_traps_hfgxtr(); + __deactivate_traps_hfgxtr(vcpu); } static inline void ___activate_traps(struct kvm_vcpu *vcpu) -- cgit From e58ec47bf68d2bcaaa97d80cc13aca4bc4abe07b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:48 +0100 Subject: KVM: arm64: nv: Add trap forwarding infrastructure A significant part of what a NV hypervisor needs to do is to decide whether a trap from a L2+ guest has to be forwarded to a L1 guest or handled locally. This is done by checking for the trap bits that the guest hypervisor has set and acting accordingly, as described by the architecture. A previous approach was to sprinkle a bunch of checks in all the system register accessors, but this is pretty error prone and doesn't help getting an overview of what is happening. Instead, implement a set of global tables that describe a trap bit, combinations of trap bits, behaviours on trap, and what bits must be evaluated on a system register trap. Although this is painful to describe, this allows to specify each and every control bit in a static manner. To make it efficient, the table is inserted in an xarray that is global to the system, and checked each time we trap a system register while running a L2 guest. Add the basic infrastructure for now, while additional patches will implement configuration registers. Signed-off-by: Marc Zyngier Reviewed-by: Jing Zhang Reviewed-by: Miguel Luis Link: https://lore.kernel.org/r/20230815183903.2735724-15-maz@kernel.org --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/include/asm/kvm_nested.h | 2 + arch/arm64/kvm/emulate-nested.c | 282 ++++++++++++++++++++++++++++++++++++ arch/arm64/kvm/sys_regs.c | 6 + arch/arm64/kvm/trace_arm.h | 26 ++++ 5 files changed, 317 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 721680da1011..cb1c5c54cedd 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -988,6 +988,7 @@ int kvm_handle_cp10_id(struct kvm_vcpu *vcpu); void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); int __init kvm_sys_reg_table_init(void); +int __init populate_nv_trap_config(void); bool lock_all_vcpus(struct kvm *kvm); void unlock_all_vcpus(struct kvm *kvm); diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h index 8fb67f032fd1..fa23cc9c2adc 100644 --- a/arch/arm64/include/asm/kvm_nested.h +++ b/arch/arm64/include/asm/kvm_nested.h @@ -11,6 +11,8 @@ static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu) test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features)); } +extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu); + struct sys_reg_params; struct sys_reg_desc; diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index b96662029fb1..d5837ed0077c 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -14,6 +14,288 @@ #include "trace.h" +enum trap_behaviour { + BEHAVE_HANDLE_LOCALLY = 0, + BEHAVE_FORWARD_READ = BIT(0), + BEHAVE_FORWARD_WRITE = BIT(1), + BEHAVE_FORWARD_ANY = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE, +}; + +struct trap_bits { + const enum vcpu_sysreg index; + const enum trap_behaviour behaviour; + const u64 value; + const u64 mask; +}; + +/* Coarse Grained Trap definitions */ +enum cgt_group_id { + /* Indicates no coarse trap control */ + __RESERVED__, + + /* + * The first batch of IDs denote coarse trapping that are used + * on their own instead of being part of a combination of + * trap controls. + */ + + /* + * Anything after this point is a combination of coarse trap + * controls, which must all be evaluated to decide what to do. + */ + __MULTIPLE_CONTROL_BITS__, + + /* + * Anything after this point requires a callback evaluating a + * complex trap condition. Hopefully we'll never need this... + */ + __COMPLEX_CONDITIONS__, + + /* Must be last */ + __NR_CGT_GROUP_IDS__ +}; + +static const struct trap_bits coarse_trap_bits[] = { +}; + +#define MCB(id, ...) \ + [id - __MULTIPLE_CONTROL_BITS__] = \ + (const enum cgt_group_id[]){ \ + __VA_ARGS__, __RESERVED__ \ + } + +static const enum cgt_group_id *coarse_control_combo[] = { +}; + +typedef enum trap_behaviour (*complex_condition_check)(struct kvm_vcpu *); + +#define CCC(id, fn) \ + [id - __COMPLEX_CONDITIONS__] = fn + +static const complex_condition_check ccc[] = { +}; + +/* + * Bit assignment for the trap controls. We use a 64bit word with the + * following layout for each trapped sysreg: + * + * [9:0] enum cgt_group_id (10 bits) + * [62:10] Unused (53 bits) + * [63] RES0 - Must be zero, as lost on insertion in the xarray + */ +#define TC_CGT_BITS 10 + +union trap_config { + u64 val; + struct { + unsigned long cgt:TC_CGT_BITS; /* Coarse Grained Trap id */ + unsigned long unused:53; /* Unused, should be zero */ + unsigned long mbz:1; /* Must Be Zero */ + }; +}; + +struct encoding_to_trap_config { + const u32 encoding; + const u32 end; + const union trap_config tc; + const unsigned int line; +}; + +#define SR_RANGE_TRAP(sr_start, sr_end, trap_id) \ + { \ + .encoding = sr_start, \ + .end = sr_end, \ + .tc = { \ + .cgt = trap_id, \ + }, \ + .line = __LINE__, \ + } + +#define SR_TRAP(sr, trap_id) SR_RANGE_TRAP(sr, sr, trap_id) + +/* + * Map encoding to trap bits for exception reported with EC=0x18. + * These must only be evaluated when running a nested hypervisor, but + * that the current context is not a hypervisor context. When the + * trapped access matches one of the trap controls, the exception is + * re-injected in the nested hypervisor. + */ +static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { +}; + +static DEFINE_XARRAY(sr_forward_xa); + +static union trap_config get_trap_config(u32 sysreg) +{ + return (union trap_config) { + .val = xa_to_value(xa_load(&sr_forward_xa, sysreg)), + }; +} + +static __init void print_nv_trap_error(const struct encoding_to_trap_config *tc, + const char *type, int err) +{ + kvm_err("%s line %d encoding range " + "(%d, %d, %d, %d, %d) - (%d, %d, %d, %d, %d) (err=%d)\n", + type, tc->line, + sys_reg_Op0(tc->encoding), sys_reg_Op1(tc->encoding), + sys_reg_CRn(tc->encoding), sys_reg_CRm(tc->encoding), + sys_reg_Op2(tc->encoding), + sys_reg_Op0(tc->end), sys_reg_Op1(tc->end), + sys_reg_CRn(tc->end), sys_reg_CRm(tc->end), + sys_reg_Op2(tc->end), + err); +} + +int __init populate_nv_trap_config(void) +{ + int ret = 0; + + BUILD_BUG_ON(sizeof(union trap_config) != sizeof(void *)); + BUILD_BUG_ON(__NR_CGT_GROUP_IDS__ > BIT(TC_CGT_BITS)); + + for (int i = 0; i < ARRAY_SIZE(encoding_to_cgt); i++) { + const struct encoding_to_trap_config *cgt = &encoding_to_cgt[i]; + void *prev; + + if (cgt->tc.val & BIT(63)) { + kvm_err("CGT[%d] has MBZ bit set\n", i); + ret = -EINVAL; + } + + if (cgt->encoding != cgt->end) { + prev = xa_store_range(&sr_forward_xa, + cgt->encoding, cgt->end, + xa_mk_value(cgt->tc.val), + GFP_KERNEL); + } else { + prev = xa_store(&sr_forward_xa, cgt->encoding, + xa_mk_value(cgt->tc.val), GFP_KERNEL); + if (prev && !xa_is_err(prev)) { + ret = -EINVAL; + print_nv_trap_error(cgt, "Duplicate CGT", ret); + } + } + + if (xa_is_err(prev)) { + ret = xa_err(prev); + print_nv_trap_error(cgt, "Failed CGT insertion", ret); + } + } + + kvm_info("nv: %ld coarse grained trap handlers\n", + ARRAY_SIZE(encoding_to_cgt)); + + for (int id = __MULTIPLE_CONTROL_BITS__; id < __COMPLEX_CONDITIONS__; id++) { + const enum cgt_group_id *cgids; + + cgids = coarse_control_combo[id - __MULTIPLE_CONTROL_BITS__]; + + for (int i = 0; cgids[i] != __RESERVED__; i++) { + if (cgids[i] >= __MULTIPLE_CONTROL_BITS__) { + kvm_err("Recursive MCB %d/%d\n", id, cgids[i]); + ret = -EINVAL; + } + } + } + + if (ret) + xa_destroy(&sr_forward_xa); + + return ret; +} + +static enum trap_behaviour get_behaviour(struct kvm_vcpu *vcpu, + const struct trap_bits *tb) +{ + enum trap_behaviour b = BEHAVE_HANDLE_LOCALLY; + u64 val; + + val = __vcpu_sys_reg(vcpu, tb->index); + if ((val & tb->mask) == tb->value) + b |= tb->behaviour; + + return b; +} + +static enum trap_behaviour __compute_trap_behaviour(struct kvm_vcpu *vcpu, + const enum cgt_group_id id, + enum trap_behaviour b) +{ + switch (id) { + const enum cgt_group_id *cgids; + + case __RESERVED__ ... __MULTIPLE_CONTROL_BITS__ - 1: + if (likely(id != __RESERVED__)) + b |= get_behaviour(vcpu, &coarse_trap_bits[id]); + break; + case __MULTIPLE_CONTROL_BITS__ ... __COMPLEX_CONDITIONS__ - 1: + /* Yes, this is recursive. Don't do anything stupid. */ + cgids = coarse_control_combo[id - __MULTIPLE_CONTROL_BITS__]; + for (int i = 0; cgids[i] != __RESERVED__; i++) + b |= __compute_trap_behaviour(vcpu, cgids[i], b); + break; + default: + if (ARRAY_SIZE(ccc)) + b |= ccc[id - __COMPLEX_CONDITIONS__](vcpu); + break; + } + + return b; +} + +static enum trap_behaviour compute_trap_behaviour(struct kvm_vcpu *vcpu, + const union trap_config tc) +{ + enum trap_behaviour b = BEHAVE_HANDLE_LOCALLY; + + return __compute_trap_behaviour(vcpu, tc.cgt, b); +} + +bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) +{ + union trap_config tc; + enum trap_behaviour b; + bool is_read; + u32 sysreg; + u64 esr; + + if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) + return false; + + esr = kvm_vcpu_get_esr(vcpu); + sysreg = esr_sys64_to_sysreg(esr); + is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ; + + tc = get_trap_config(sysreg); + + /* + * A value of 0 for the whole entry means that we know nothing + * for this sysreg, and that it cannot be re-injected into the + * nested hypervisor. In this situation, let's cut it short. + * + * Note that ultimately, we could also make use of the xarray + * to store the index of the sysreg in the local descriptor + * array, avoiding another search... Hint, hint... + */ + if (!tc.val) + return false; + + b = compute_trap_behaviour(vcpu, tc); + + if (((b & BEHAVE_FORWARD_READ) && is_read) || + ((b & BEHAVE_FORWARD_WRITE) && !is_read)) + goto inject; + + return false; + +inject: + trace_kvm_forward_sysreg_trap(vcpu, sysreg, is_read); + + kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); + return true; +} + static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr) { u64 mode = spsr & PSR_MODE_MASK; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index f5baaa508926..9556896311db 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -3177,6 +3177,9 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu) trace_kvm_handle_sys_reg(esr); + if (__check_nv_sr_forward(vcpu)) + return 1; + params = esr_sys64_to_params(esr); params.regval = vcpu_get_reg(vcpu, Rt); @@ -3594,5 +3597,8 @@ int __init kvm_sys_reg_table_init(void) if (!first_idreg) return -EINVAL; + if (kvm_get_mode() == KVM_MODE_NV) + return populate_nv_trap_config(); + return 0; } diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h index 6ce5c025218d..8ad53104934d 100644 --- a/arch/arm64/kvm/trace_arm.h +++ b/arch/arm64/kvm/trace_arm.h @@ -364,6 +364,32 @@ TRACE_EVENT(kvm_inject_nested_exception, __entry->hcr_el2) ); +TRACE_EVENT(kvm_forward_sysreg_trap, + TP_PROTO(struct kvm_vcpu *vcpu, u32 sysreg, bool is_read), + TP_ARGS(vcpu, sysreg, is_read), + + TP_STRUCT__entry( + __field(u64, pc) + __field(u32, sysreg) + __field(bool, is_read) + ), + + TP_fast_assign( + __entry->pc = *vcpu_pc(vcpu); + __entry->sysreg = sysreg; + __entry->is_read = is_read; + ), + + TP_printk("%llx %c (%d,%d,%d,%d,%d)", + __entry->pc, + __entry->is_read ? 'R' : 'W', + sys_reg_Op0(__entry->sysreg), + sys_reg_Op1(__entry->sysreg), + sys_reg_CRn(__entry->sysreg), + sys_reg_CRm(__entry->sysreg), + sys_reg_Op2(__entry->sysreg)) +); + #endif /* _TRACE_ARM_ARM64_KVM_H */ #undef TRACE_INCLUDE_PATH -- cgit From d0fc0a2519a6dd906aac448e742958d30b5787ac Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:49 +0100 Subject: KVM: arm64: nv: Add trap forwarding for HCR_EL2 Describe the HCR_EL2 register, and associate it with all the sysregs it allows to trap. Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Reviewed-by: Jing Zhang Link: https://lore.kernel.org/r/20230815183903.2735724-16-maz@kernel.org --- arch/arm64/kvm/emulate-nested.c | 488 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 488 insertions(+) diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index d5837ed0077c..975a30ef874a 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -38,12 +38,48 @@ enum cgt_group_id { * on their own instead of being part of a combination of * trap controls. */ + CGT_HCR_TID1, + CGT_HCR_TID2, + CGT_HCR_TID3, + CGT_HCR_IMO, + CGT_HCR_FMO, + CGT_HCR_TIDCP, + CGT_HCR_TACR, + CGT_HCR_TSW, + CGT_HCR_TPC, + CGT_HCR_TPU, + CGT_HCR_TTLB, + CGT_HCR_TVM, + CGT_HCR_TDZ, + CGT_HCR_TRVM, + CGT_HCR_TLOR, + CGT_HCR_TERR, + CGT_HCR_APK, + CGT_HCR_NV, + CGT_HCR_NV_nNV2, + CGT_HCR_NV1_nNV2, + CGT_HCR_AT, + CGT_HCR_nFIEN, + CGT_HCR_TID4, + CGT_HCR_TICAB, + CGT_HCR_TOCU, + CGT_HCR_ENSCXT, + CGT_HCR_TTLBIS, + CGT_HCR_TTLBOS, /* * Anything after this point is a combination of coarse trap * controls, which must all be evaluated to decide what to do. */ __MULTIPLE_CONTROL_BITS__, + CGT_HCR_IMO_FMO = __MULTIPLE_CONTROL_BITS__, + CGT_HCR_TID2_TID4, + CGT_HCR_TTLB_TTLBIS, + CGT_HCR_TTLB_TTLBOS, + CGT_HCR_TVM_TRVM, + CGT_HCR_TPU_TICAB, + CGT_HCR_TPU_TOCU, + CGT_HCR_NV1_nNV2_ENSCXT, /* * Anything after this point requires a callback evaluating a @@ -56,6 +92,174 @@ enum cgt_group_id { }; static const struct trap_bits coarse_trap_bits[] = { + [CGT_HCR_TID1] = { + .index = HCR_EL2, + .value = HCR_TID1, + .mask = HCR_TID1, + .behaviour = BEHAVE_FORWARD_READ, + }, + [CGT_HCR_TID2] = { + .index = HCR_EL2, + .value = HCR_TID2, + .mask = HCR_TID2, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TID3] = { + .index = HCR_EL2, + .value = HCR_TID3, + .mask = HCR_TID3, + .behaviour = BEHAVE_FORWARD_READ, + }, + [CGT_HCR_IMO] = { + .index = HCR_EL2, + .value = HCR_IMO, + .mask = HCR_IMO, + .behaviour = BEHAVE_FORWARD_WRITE, + }, + [CGT_HCR_FMO] = { + .index = HCR_EL2, + .value = HCR_FMO, + .mask = HCR_FMO, + .behaviour = BEHAVE_FORWARD_WRITE, + }, + [CGT_HCR_TIDCP] = { + .index = HCR_EL2, + .value = HCR_TIDCP, + .mask = HCR_TIDCP, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TACR] = { + .index = HCR_EL2, + .value = HCR_TACR, + .mask = HCR_TACR, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TSW] = { + .index = HCR_EL2, + .value = HCR_TSW, + .mask = HCR_TSW, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TPC] = { /* Also called TCPC when FEAT_DPB is implemented */ + .index = HCR_EL2, + .value = HCR_TPC, + .mask = HCR_TPC, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TPU] = { + .index = HCR_EL2, + .value = HCR_TPU, + .mask = HCR_TPU, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TTLB] = { + .index = HCR_EL2, + .value = HCR_TTLB, + .mask = HCR_TTLB, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TVM] = { + .index = HCR_EL2, + .value = HCR_TVM, + .mask = HCR_TVM, + .behaviour = BEHAVE_FORWARD_WRITE, + }, + [CGT_HCR_TDZ] = { + .index = HCR_EL2, + .value = HCR_TDZ, + .mask = HCR_TDZ, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TRVM] = { + .index = HCR_EL2, + .value = HCR_TRVM, + .mask = HCR_TRVM, + .behaviour = BEHAVE_FORWARD_READ, + }, + [CGT_HCR_TLOR] = { + .index = HCR_EL2, + .value = HCR_TLOR, + .mask = HCR_TLOR, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TERR] = { + .index = HCR_EL2, + .value = HCR_TERR, + .mask = HCR_TERR, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_APK] = { + .index = HCR_EL2, + .value = 0, + .mask = HCR_APK, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_NV] = { + .index = HCR_EL2, + .value = HCR_NV, + .mask = HCR_NV, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_NV_nNV2] = { + .index = HCR_EL2, + .value = HCR_NV, + .mask = HCR_NV | HCR_NV2, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_NV1_nNV2] = { + .index = HCR_EL2, + .value = HCR_NV | HCR_NV1, + .mask = HCR_NV | HCR_NV1 | HCR_NV2, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_AT] = { + .index = HCR_EL2, + .value = HCR_AT, + .mask = HCR_AT, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_nFIEN] = { + .index = HCR_EL2, + .value = 0, + .mask = HCR_FIEN, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TID4] = { + .index = HCR_EL2, + .value = HCR_TID4, + .mask = HCR_TID4, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TICAB] = { + .index = HCR_EL2, + .value = HCR_TICAB, + .mask = HCR_TICAB, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TOCU] = { + .index = HCR_EL2, + .value = HCR_TOCU, + .mask = HCR_TOCU, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_ENSCXT] = { + .index = HCR_EL2, + .value = 0, + .mask = HCR_ENSCXT, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TTLBIS] = { + .index = HCR_EL2, + .value = HCR_TTLBIS, + .mask = HCR_TTLBIS, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_HCR_TTLBOS] = { + .index = HCR_EL2, + .value = HCR_TTLBOS, + .mask = HCR_TTLBOS, + .behaviour = BEHAVE_FORWARD_ANY, + }, }; #define MCB(id, ...) \ @@ -65,6 +269,14 @@ static const struct trap_bits coarse_trap_bits[] = { } static const enum cgt_group_id *coarse_control_combo[] = { + MCB(CGT_HCR_IMO_FMO, CGT_HCR_IMO, CGT_HCR_FMO), + MCB(CGT_HCR_TID2_TID4, CGT_HCR_TID2, CGT_HCR_TID4), + MCB(CGT_HCR_TTLB_TTLBIS, CGT_HCR_TTLB, CGT_HCR_TTLBIS), + MCB(CGT_HCR_TTLB_TTLBOS, CGT_HCR_TTLB, CGT_HCR_TTLBOS), + MCB(CGT_HCR_TVM_TRVM, CGT_HCR_TVM, CGT_HCR_TRVM), + MCB(CGT_HCR_TPU_TICAB, CGT_HCR_TPU, CGT_HCR_TICAB), + MCB(CGT_HCR_TPU_TOCU, CGT_HCR_TPU, CGT_HCR_TOCU), + MCB(CGT_HCR_NV1_nNV2_ENSCXT, CGT_HCR_NV1_nNV2, CGT_HCR_ENSCXT), }; typedef enum trap_behaviour (*complex_condition_check)(struct kvm_vcpu *); @@ -121,6 +333,282 @@ struct encoding_to_trap_config { * re-injected in the nested hypervisor. */ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { + SR_TRAP(SYS_REVIDR_EL1, CGT_HCR_TID1), + SR_TRAP(SYS_AIDR_EL1, CGT_HCR_TID1), + SR_TRAP(SYS_SMIDR_EL1, CGT_HCR_TID1), + SR_TRAP(SYS_CTR_EL0, CGT_HCR_TID2), + SR_TRAP(SYS_CCSIDR_EL1, CGT_HCR_TID2_TID4), + SR_TRAP(SYS_CCSIDR2_EL1, CGT_HCR_TID2_TID4), + SR_TRAP(SYS_CLIDR_EL1, CGT_HCR_TID2_TID4), + SR_TRAP(SYS_CSSELR_EL1, CGT_HCR_TID2_TID4), + SR_RANGE_TRAP(SYS_ID_PFR0_EL1, + sys_reg(3, 0, 0, 7, 7), CGT_HCR_TID3), + SR_TRAP(SYS_ICC_SGI0R_EL1, CGT_HCR_IMO_FMO), + SR_TRAP(SYS_ICC_ASGI1R_EL1, CGT_HCR_IMO_FMO), + SR_TRAP(SYS_ICC_SGI1R_EL1, CGT_HCR_IMO_FMO), + SR_RANGE_TRAP(sys_reg(3, 0, 11, 0, 0), + sys_reg(3, 0, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 1, 11, 0, 0), + sys_reg(3, 1, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 2, 11, 0, 0), + sys_reg(3, 2, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 3, 11, 0, 0), + sys_reg(3, 3, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 4, 11, 0, 0), + sys_reg(3, 4, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 5, 11, 0, 0), + sys_reg(3, 5, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 6, 11, 0, 0), + sys_reg(3, 6, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 7, 11, 0, 0), + sys_reg(3, 7, 11, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 0, 15, 0, 0), + sys_reg(3, 0, 15, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 1, 15, 0, 0), + sys_reg(3, 1, 15, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 2, 15, 0, 0), + sys_reg(3, 2, 15, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 3, 15, 0, 0), + sys_reg(3, 3, 15, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 4, 15, 0, 0), + sys_reg(3, 4, 15, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 5, 15, 0, 0), + sys_reg(3, 5, 15, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 6, 15, 0, 0), + sys_reg(3, 6, 15, 15, 7), CGT_HCR_TIDCP), + SR_RANGE_TRAP(sys_reg(3, 7, 15, 0, 0), + sys_reg(3, 7, 15, 15, 7), CGT_HCR_TIDCP), + SR_TRAP(SYS_ACTLR_EL1, CGT_HCR_TACR), + SR_TRAP(SYS_DC_ISW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_CSW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_CISW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_IGSW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_IGDSW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_CGSW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_CGDSW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_CIGSW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_CIGDSW, CGT_HCR_TSW), + SR_TRAP(SYS_DC_CIVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CVAP, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CVADP, CGT_HCR_TPC), + SR_TRAP(SYS_DC_IVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CIGVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CIGDVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_IGVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_IGDVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CGVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CGDVAC, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CGVAP, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CGDVAP, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CGVADP, CGT_HCR_TPC), + SR_TRAP(SYS_DC_CGDVADP, CGT_HCR_TPC), + SR_TRAP(SYS_IC_IVAU, CGT_HCR_TPU_TOCU), + SR_TRAP(SYS_IC_IALLU, CGT_HCR_TPU_TOCU), + SR_TRAP(SYS_IC_IALLUIS, CGT_HCR_TPU_TICAB), + SR_TRAP(SYS_DC_CVAU, CGT_HCR_TPU_TOCU), + SR_TRAP(OP_TLBI_RVAE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVAAE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVALE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVAALE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VMALLE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VAE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_ASIDE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VAAE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VALE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VAALE1, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVAE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVAAE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVALE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVAALE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VMALLE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VAE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_ASIDE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VAAE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VALE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_VAALE1NXS, CGT_HCR_TTLB), + SR_TRAP(OP_TLBI_RVAE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_RVAAE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_RVALE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_RVAALE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VMALLE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VAE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_ASIDE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VAAE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VALE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VAALE1IS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_RVAE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_RVAAE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_RVALE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_RVAALE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VMALLE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VAE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_ASIDE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VAAE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VALE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VAALE1ISNXS, CGT_HCR_TTLB_TTLBIS), + SR_TRAP(OP_TLBI_VMALLE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VAE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_ASIDE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VAAE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VALE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VAALE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVAE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVAAE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVALE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVAALE1OS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VMALLE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VAE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_ASIDE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VAAE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VALE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_VAALE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVAE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVAAE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVALE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(OP_TLBI_RVAALE1OSNXS, CGT_HCR_TTLB_TTLBOS), + SR_TRAP(SYS_SCTLR_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_TTBR0_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_TTBR1_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_TCR_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_ESR_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_FAR_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_AFSR0_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_AFSR1_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_MAIR_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_AMAIR_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_CONTEXTIDR_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_DC_ZVA, CGT_HCR_TDZ), + SR_TRAP(SYS_DC_GVA, CGT_HCR_TDZ), + SR_TRAP(SYS_DC_GZVA, CGT_HCR_TDZ), + SR_TRAP(SYS_LORSA_EL1, CGT_HCR_TLOR), + SR_TRAP(SYS_LOREA_EL1, CGT_HCR_TLOR), + SR_TRAP(SYS_LORN_EL1, CGT_HCR_TLOR), + SR_TRAP(SYS_LORC_EL1, CGT_HCR_TLOR), + SR_TRAP(SYS_LORID_EL1, CGT_HCR_TLOR), + SR_TRAP(SYS_ERRIDR_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERRSELR_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXADDR_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXCTLR_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXFR_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXMISC0_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXMISC1_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXMISC2_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXMISC3_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_ERXSTATUS_EL1, CGT_HCR_TERR), + SR_TRAP(SYS_APIAKEYLO_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APIAKEYHI_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APIBKEYLO_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APIBKEYHI_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APDAKEYLO_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APDAKEYHI_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APDBKEYLO_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APDBKEYHI_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APGAKEYLO_EL1, CGT_HCR_APK), + SR_TRAP(SYS_APGAKEYHI_EL1, CGT_HCR_APK), + /* All _EL2 registers */ + SR_RANGE_TRAP(sys_reg(3, 4, 0, 0, 0), + sys_reg(3, 4, 3, 15, 7), CGT_HCR_NV), + /* Skip the SP_EL1 encoding... */ + SR_RANGE_TRAP(sys_reg(3, 4, 4, 1, 1), + sys_reg(3, 4, 10, 15, 7), CGT_HCR_NV), + SR_RANGE_TRAP(sys_reg(3, 4, 12, 0, 0), + sys_reg(3, 4, 14, 15, 7), CGT_HCR_NV), + /* All _EL02, _EL12 registers */ + SR_RANGE_TRAP(sys_reg(3, 5, 0, 0, 0), + sys_reg(3, 5, 10, 15, 7), CGT_HCR_NV), + SR_RANGE_TRAP(sys_reg(3, 5, 12, 0, 0), + sys_reg(3, 5, 14, 15, 7), CGT_HCR_NV), + SR_TRAP(OP_AT_S1E2R, CGT_HCR_NV), + SR_TRAP(OP_AT_S1E2W, CGT_HCR_NV), + SR_TRAP(OP_AT_S12E1R, CGT_HCR_NV), + SR_TRAP(OP_AT_S12E1W, CGT_HCR_NV), + SR_TRAP(OP_AT_S12E0R, CGT_HCR_NV), + SR_TRAP(OP_AT_S12E0W, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2E1, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2E1, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2LE1, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2LE1, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVAE2, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVALE2, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE2, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VAE2, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE1, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VALE2, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VMALLS12E1, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2E1NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2E1NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2LE1NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2LE1NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVAE2NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVALE2NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE2NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VAE2NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE1NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VALE2NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VMALLS12E1NXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2E1IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2E1IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2LE1IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2LE1IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVAE2IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVALE2IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE2IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VAE2IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE1IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VALE2IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VMALLS12E1IS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2E1ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2E1ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2LE1ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2LE1ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVAE2ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVALE2ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE2ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VAE2ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE1ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VALE2ISNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VMALLS12E1ISNXS,CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE2OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VAE2OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE1OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VALE2OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VMALLS12E1OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2E1OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2E1OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2LE1OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2LE1OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVAE2OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVALE2OS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE2OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VAE2OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_ALLE1OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VALE2OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_VMALLS12E1OSNXS,CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2E1OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2E1OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_IPAS2LE1OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RIPAS2LE1OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVAE2OSNXS, CGT_HCR_NV), + SR_TRAP(OP_TLBI_RVALE2OSNXS, CGT_HCR_NV), + SR_TRAP(OP_CPP_RCTX, CGT_HCR_NV), + SR_TRAP(OP_DVP_RCTX, CGT_HCR_NV), + SR_TRAP(OP_CFP_RCTX, CGT_HCR_NV), + SR_TRAP(SYS_SP_EL1, CGT_HCR_NV_nNV2), + SR_TRAP(SYS_VBAR_EL1, CGT_HCR_NV1_nNV2), + SR_TRAP(SYS_ELR_EL1, CGT_HCR_NV1_nNV2), + SR_TRAP(SYS_SPSR_EL1, CGT_HCR_NV1_nNV2), + SR_TRAP(SYS_SCXTNUM_EL1, CGT_HCR_NV1_nNV2_ENSCXT), + SR_TRAP(SYS_SCXTNUM_EL0, CGT_HCR_ENSCXT), + SR_TRAP(OP_AT_S1E1R, CGT_HCR_AT), + SR_TRAP(OP_AT_S1E1W, CGT_HCR_AT), + SR_TRAP(OP_AT_S1E0R, CGT_HCR_AT), + SR_TRAP(OP_AT_S1E0W, CGT_HCR_AT), + SR_TRAP(OP_AT_S1E1RP, CGT_HCR_AT), + SR_TRAP(OP_AT_S1E1WP, CGT_HCR_AT), + SR_TRAP(SYS_ERXPFGF_EL1, CGT_HCR_nFIEN), + SR_TRAP(SYS_ERXPFGCTL_EL1, CGT_HCR_nFIEN), + SR_TRAP(SYS_ERXPFGCDN_EL1, CGT_HCR_nFIEN), }; static DEFINE_XARRAY(sr_forward_xa); -- cgit From a0b70fb00db83e678f92b8aed0a9a9e4ffcffb82 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:50 +0100 Subject: KVM: arm64: nv: Expose FEAT_EVT to nested guests Now that we properly implement FEAT_EVT (as we correctly forward traps), expose it to guests. Reviewed-by: Eric Auger Reviewed-by: Jing Zhang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230815183903.2735724-17-maz@kernel.org --- arch/arm64/kvm/nested.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 315354d27978..7f80f385d9e8 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -124,8 +124,7 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p, break; case SYS_ID_AA64MMFR2_EL1: - val &= ~(NV_FTR(MMFR2, EVT) | - NV_FTR(MMFR2, BBM) | + val &= ~(NV_FTR(MMFR2, BBM) | NV_FTR(MMFR2, TTL) | GENMASK_ULL(47, 44) | NV_FTR(MMFR2, ST) | -- cgit From cb31632c44529048c052a2961b3adf62a2c89b17 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:51 +0100 Subject: KVM: arm64: nv: Add trap forwarding for MDCR_EL2 Describe the MDCR_EL2 register, and associate it with all the sysregs it allows to trap. Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Reviewed-by: Jing Zhang Link: https://lore.kernel.org/r/20230815183903.2735724-18-maz@kernel.org --- arch/arm64/kvm/emulate-nested.c | 268 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 268 insertions(+) diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index 975a30ef874a..241e44eeed6d 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -67,6 +67,18 @@ enum cgt_group_id { CGT_HCR_TTLBIS, CGT_HCR_TTLBOS, + CGT_MDCR_TPMCR, + CGT_MDCR_TPM, + CGT_MDCR_TDE, + CGT_MDCR_TDA, + CGT_MDCR_TDOSA, + CGT_MDCR_TDRA, + CGT_MDCR_E2PB, + CGT_MDCR_TPMS, + CGT_MDCR_TTRF, + CGT_MDCR_E2TB, + CGT_MDCR_TDCC, + /* * Anything after this point is a combination of coarse trap * controls, which must all be evaluated to decide what to do. @@ -80,6 +92,11 @@ enum cgt_group_id { CGT_HCR_TPU_TICAB, CGT_HCR_TPU_TOCU, CGT_HCR_NV1_nNV2_ENSCXT, + CGT_MDCR_TPM_TPMCR, + CGT_MDCR_TDE_TDA, + CGT_MDCR_TDE_TDOSA, + CGT_MDCR_TDE_TDRA, + CGT_MDCR_TDCC_TDE_TDA, /* * Anything after this point requires a callback evaluating a @@ -260,6 +277,72 @@ static const struct trap_bits coarse_trap_bits[] = { .mask = HCR_TTLBOS, .behaviour = BEHAVE_FORWARD_ANY, }, + [CGT_MDCR_TPMCR] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TPMCR, + .mask = MDCR_EL2_TPMCR, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TPM] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TPM, + .mask = MDCR_EL2_TPM, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TDE] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TDE, + .mask = MDCR_EL2_TDE, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TDA] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TDA, + .mask = MDCR_EL2_TDA, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TDOSA] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TDOSA, + .mask = MDCR_EL2_TDOSA, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TDRA] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TDRA, + .mask = MDCR_EL2_TDRA, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_E2PB] = { + .index = MDCR_EL2, + .value = 0, + .mask = BIT(MDCR_EL2_E2PB_SHIFT), + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TPMS] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TPMS, + .mask = MDCR_EL2_TPMS, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TTRF] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TTRF, + .mask = MDCR_EL2_TTRF, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_E2TB] = { + .index = MDCR_EL2, + .value = 0, + .mask = BIT(MDCR_EL2_E2TB_SHIFT), + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_MDCR_TDCC] = { + .index = MDCR_EL2, + .value = MDCR_EL2_TDCC, + .mask = MDCR_EL2_TDCC, + .behaviour = BEHAVE_FORWARD_ANY, + }, }; #define MCB(id, ...) \ @@ -277,6 +360,11 @@ static const enum cgt_group_id *coarse_control_combo[] = { MCB(CGT_HCR_TPU_TICAB, CGT_HCR_TPU, CGT_HCR_TICAB), MCB(CGT_HCR_TPU_TOCU, CGT_HCR_TPU, CGT_HCR_TOCU), MCB(CGT_HCR_NV1_nNV2_ENSCXT, CGT_HCR_NV1_nNV2, CGT_HCR_ENSCXT), + MCB(CGT_MDCR_TPM_TPMCR, CGT_MDCR_TPM, CGT_MDCR_TPMCR), + MCB(CGT_MDCR_TDE_TDA, CGT_MDCR_TDE, CGT_MDCR_TDA), + MCB(CGT_MDCR_TDE_TDOSA, CGT_MDCR_TDE, CGT_MDCR_TDOSA), + MCB(CGT_MDCR_TDE_TDRA, CGT_MDCR_TDE, CGT_MDCR_TDRA), + MCB(CGT_MDCR_TDCC_TDE_TDA, CGT_MDCR_TDCC, CGT_MDCR_TDE, CGT_MDCR_TDA), }; typedef enum trap_behaviour (*complex_condition_check)(struct kvm_vcpu *); @@ -609,6 +697,186 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { SR_TRAP(SYS_ERXPFGF_EL1, CGT_HCR_nFIEN), SR_TRAP(SYS_ERXPFGCTL_EL1, CGT_HCR_nFIEN), SR_TRAP(SYS_ERXPFGCDN_EL1, CGT_HCR_nFIEN), + SR_TRAP(SYS_PMCR_EL0, CGT_MDCR_TPM_TPMCR), + SR_TRAP(SYS_PMCNTENSET_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMCNTENCLR_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMOVSSET_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMOVSCLR_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMCEID0_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMCEID1_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMXEVTYPER_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMSWINC_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMSELR_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMXEVCNTR_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMCCNTR_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMUSERENR_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_PMINTENSET_EL1, CGT_MDCR_TPM), + SR_TRAP(SYS_PMINTENCLR_EL1, CGT_MDCR_TPM), + SR_TRAP(SYS_PMMIR_EL1, CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(0), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(1), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(2), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(3), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(4), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(5), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(6), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(7), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(8), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(9), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(10), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(11), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(12), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(13), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(14), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(15), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(16), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(17), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(18), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(19), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(20), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(21), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(22), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(23), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(24), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(25), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(26), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(27), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(28), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(29), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVCNTRn_EL0(30), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(0), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(1), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(2), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(3), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(4), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(5), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(6), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(7), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(8), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(9), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(10), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(11), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(12), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(13), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(14), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(15), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(16), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(17), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(18), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(19), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(20), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(21), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(22), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(23), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(24), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(25), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(26), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(27), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(28), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(29), CGT_MDCR_TPM), + SR_TRAP(SYS_PMEVTYPERn_EL0(30), CGT_MDCR_TPM), + SR_TRAP(SYS_PMCCFILTR_EL0, CGT_MDCR_TPM), + SR_TRAP(SYS_MDCCSR_EL0, CGT_MDCR_TDCC_TDE_TDA), + SR_TRAP(SYS_MDCCINT_EL1, CGT_MDCR_TDCC_TDE_TDA), + SR_TRAP(SYS_OSDTRRX_EL1, CGT_MDCR_TDCC_TDE_TDA), + SR_TRAP(SYS_OSDTRTX_EL1, CGT_MDCR_TDCC_TDE_TDA), + SR_TRAP(SYS_DBGDTR_EL0, CGT_MDCR_TDCC_TDE_TDA), + /* + * Also covers DBGDTRRX_EL0, which has the same encoding as + * SYS_DBGDTRTX_EL0... + */ + SR_TRAP(SYS_DBGDTRTX_EL0, CGT_MDCR_TDCC_TDE_TDA), + SR_TRAP(SYS_MDSCR_EL1, CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_OSECCR_EL1, CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(0), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(1), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(2), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(3), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(4), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(5), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(6), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(7), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(8), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(9), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(10), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(11), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(12), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(13), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(14), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBVRn_EL1(15), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(0), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(1), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(2), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(3), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(4), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(5), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(6), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(7), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(8), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(9), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(10), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(11), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(12), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(13), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(14), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGBCRn_EL1(15), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(0), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(1), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(2), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(3), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(4), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(5), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(6), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(7), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(8), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(9), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(10), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(11), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(12), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(13), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(14), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWVRn_EL1(15), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(0), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(1), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(2), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(3), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(4), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(5), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(6), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(7), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(8), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(9), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(10), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(11), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(12), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(13), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(14), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGCLAIMSET_EL1, CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGCLAIMCLR_EL1, CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGAUTHSTATUS_EL1, CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_OSLAR_EL1, CGT_MDCR_TDE_TDOSA), + SR_TRAP(SYS_OSLSR_EL1, CGT_MDCR_TDE_TDOSA), + SR_TRAP(SYS_OSDLR_EL1, CGT_MDCR_TDE_TDOSA), + SR_TRAP(SYS_DBGPRCR_EL1, CGT_MDCR_TDE_TDOSA), + SR_TRAP(SYS_MDRAR_EL1, CGT_MDCR_TDE_TDRA), + SR_TRAP(SYS_PMBLIMITR_EL1, CGT_MDCR_E2PB), + SR_TRAP(SYS_PMBPTR_EL1, CGT_MDCR_E2PB), + SR_TRAP(SYS_PMBSR_EL1, CGT_MDCR_E2PB), + SR_TRAP(SYS_PMSCR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_PMSEVFR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_PMSFCR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_PMSICR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_PMSIDR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_PMSIRR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_PMSLATFR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_PMSNEVFR_EL1, CGT_MDCR_TPMS), + SR_TRAP(SYS_TRFCR_EL1, CGT_MDCR_TTRF), + SR_TRAP(SYS_TRBBASER_EL1, CGT_MDCR_E2TB), + SR_TRAP(SYS_TRBLIMITR_EL1, CGT_MDCR_E2TB), + SR_TRAP(SYS_TRBMAR_EL1, CGT_MDCR_E2TB), + SR_TRAP(SYS_TRBPTR_EL1, CGT_MDCR_E2TB), + SR_TRAP(SYS_TRBSR_EL1, CGT_MDCR_E2TB), + SR_TRAP(SYS_TRBTRG_EL1, CGT_MDCR_E2TB), }; static DEFINE_XARRAY(sr_forward_xa); -- cgit From e880bd3363237ed8abbe623d1b49d59d5f6fe0d1 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:52 +0100 Subject: KVM: arm64: nv: Add trap forwarding for CNTHCTL_EL2 Describe the CNTHCTL_EL2 register, and associate it with all the sysregs it allows to trap. Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Reviewed-by: Jing Zhang Link: https://lore.kernel.org/r/20230815183903.2735724-19-maz@kernel.org --- arch/arm64/kvm/emulate-nested.c | 50 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index 241e44eeed6d..860910386b5b 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -100,9 +100,11 @@ enum cgt_group_id { /* * Anything after this point requires a callback evaluating a - * complex trap condition. Hopefully we'll never need this... + * complex trap condition. Ugly stuff. */ __COMPLEX_CONDITIONS__, + CGT_CNTHCTL_EL1PCTEN = __COMPLEX_CONDITIONS__, + CGT_CNTHCTL_EL1PTEN, /* Must be last */ __NR_CGT_GROUP_IDS__ @@ -369,10 +371,51 @@ static const enum cgt_group_id *coarse_control_combo[] = { typedef enum trap_behaviour (*complex_condition_check)(struct kvm_vcpu *); +/* + * Warning, maximum confusion ahead. + * + * When E2H=0, CNTHCTL_EL2[1:0] are defined as EL1PCEN:EL1PCTEN + * When E2H=1, CNTHCTL_EL2[11:10] are defined as EL1PTEN:EL1PCTEN + * + * Note the single letter difference? Yet, the bits have the same + * function despite a different layout and a different name. + * + * We don't try to reconcile this mess. We just use the E2H=0 bits + * to generate something that is in the E2H=1 format, and live with + * it. You're welcome. + */ +static u64 get_sanitized_cnthctl(struct kvm_vcpu *vcpu) +{ + u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2); + + if (!vcpu_el2_e2h_is_set(vcpu)) + val = (val & (CNTHCTL_EL1PCEN | CNTHCTL_EL1PCTEN)) << 10; + + return val & ((CNTHCTL_EL1PCEN | CNTHCTL_EL1PCTEN) << 10); +} + +static enum trap_behaviour check_cnthctl_el1pcten(struct kvm_vcpu *vcpu) +{ + if (get_sanitized_cnthctl(vcpu) & (CNTHCTL_EL1PCTEN << 10)) + return BEHAVE_HANDLE_LOCALLY; + + return BEHAVE_FORWARD_ANY; +} + +static enum trap_behaviour check_cnthctl_el1pten(struct kvm_vcpu *vcpu) +{ + if (get_sanitized_cnthctl(vcpu) & (CNTHCTL_EL1PCEN << 10)) + return BEHAVE_HANDLE_LOCALLY; + + return BEHAVE_FORWARD_ANY; +} + #define CCC(id, fn) \ [id - __COMPLEX_CONDITIONS__] = fn static const complex_condition_check ccc[] = { + CCC(CGT_CNTHCTL_EL1PCTEN, check_cnthctl_el1pcten), + CCC(CGT_CNTHCTL_EL1PTEN, check_cnthctl_el1pten), }; /* @@ -877,6 +920,11 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { SR_TRAP(SYS_TRBPTR_EL1, CGT_MDCR_E2TB), SR_TRAP(SYS_TRBSR_EL1, CGT_MDCR_E2TB), SR_TRAP(SYS_TRBTRG_EL1, CGT_MDCR_E2TB), + SR_TRAP(SYS_CNTP_TVAL_EL0, CGT_CNTHCTL_EL1PTEN), + SR_TRAP(SYS_CNTP_CVAL_EL0, CGT_CNTHCTL_EL1PTEN), + SR_TRAP(SYS_CNTP_CTL_EL0, CGT_CNTHCTL_EL1PTEN), + SR_TRAP(SYS_CNTPCT_EL0, CGT_CNTHCTL_EL1PCTEN), + SR_TRAP(SYS_CNTPCTSS_EL0, CGT_CNTHCTL_EL1PCTEN), }; static DEFINE_XARRAY(sr_forward_xa); -- cgit From 15b4d82d69d7b0e5833b7a023dff3d7bbae5ccfc Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:53 +0100 Subject: KVM: arm64: nv: Add fine grained trap forwarding infrastructure Fine Grained Traps are fun. Not. Implement the fine grained trap forwarding, reusing the Coarse Grained Traps infrastructure previously implemented. Each sysreg/instruction inserted in the xarray gets a FGT group (vaguely equivalent to a register number), a bit number in that register, and a polarity. It is then pretty easy to check the FGT state at handling time, just like we do for the coarse version (it is just faster). Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Reviewed-by: Jing Zhang Link: https://lore.kernel.org/r/20230815183903.2735724-20-maz@kernel.org --- arch/arm64/kvm/emulate-nested.c | 90 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 87 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index 860910386b5b..0da9d92ed921 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -423,16 +423,23 @@ static const complex_condition_check ccc[] = { * following layout for each trapped sysreg: * * [9:0] enum cgt_group_id (10 bits) - * [62:10] Unused (53 bits) + * [13:10] enum fgt_group_id (4 bits) + * [19:14] bit number in the FGT register (6 bits) + * [20] trap polarity (1 bit) + * [62:21] Unused (42 bits) * [63] RES0 - Must be zero, as lost on insertion in the xarray */ #define TC_CGT_BITS 10 +#define TC_FGT_BITS 4 union trap_config { u64 val; struct { unsigned long cgt:TC_CGT_BITS; /* Coarse Grained Trap id */ - unsigned long unused:53; /* Unused, should be zero */ + unsigned long fgt:TC_FGT_BITS; /* Fine Grained Trap id */ + unsigned long bit:6; /* Bit number */ + unsigned long pol:1; /* Polarity */ + unsigned long unused:42; /* Unused, should be zero */ unsigned long mbz:1; /* Must Be Zero */ }; }; @@ -929,6 +936,28 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { static DEFINE_XARRAY(sr_forward_xa); +enum fgt_group_id { + __NO_FGT_GROUP__, + + /* Must be last */ + __NR_FGT_GROUP_IDS__ +}; + +#define SR_FGT(sr, g, b, p) \ + { \ + .encoding = sr, \ + .end = sr, \ + .tc = { \ + .fgt = g ## _GROUP, \ + .bit = g ## _EL2_ ## b ## _SHIFT, \ + .pol = p, \ + }, \ + .line = __LINE__, \ + } + +static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = { +}; + static union trap_config get_trap_config(u32 sysreg) { return (union trap_config) { @@ -957,6 +986,7 @@ int __init populate_nv_trap_config(void) BUILD_BUG_ON(sizeof(union trap_config) != sizeof(void *)); BUILD_BUG_ON(__NR_CGT_GROUP_IDS__ > BIT(TC_CGT_BITS)); + BUILD_BUG_ON(__NR_FGT_GROUP_IDS__ > BIT(TC_FGT_BITS)); for (int i = 0; i < ARRAY_SIZE(encoding_to_cgt); i++) { const struct encoding_to_trap_config *cgt = &encoding_to_cgt[i]; @@ -990,6 +1020,34 @@ int __init populate_nv_trap_config(void) kvm_info("nv: %ld coarse grained trap handlers\n", ARRAY_SIZE(encoding_to_cgt)); + if (!cpus_have_final_cap(ARM64_HAS_FGT)) + goto check_mcb; + + for (int i = 0; i < ARRAY_SIZE(encoding_to_fgt); i++) { + const struct encoding_to_trap_config *fgt = &encoding_to_fgt[i]; + union trap_config tc; + + if (fgt->tc.fgt >= __NR_FGT_GROUP_IDS__) { + ret = -EINVAL; + print_nv_trap_error(fgt, "Invalid FGT", ret); + } + + tc = get_trap_config(fgt->encoding); + + if (tc.fgt) { + ret = -EINVAL; + print_nv_trap_error(fgt, "Duplicate FGT", ret); + } + + tc.val |= fgt->tc.val; + xa_store(&sr_forward_xa, fgt->encoding, + xa_mk_value(tc.val), GFP_KERNEL); + } + + kvm_info("nv: %ld fine grained trap handlers\n", + ARRAY_SIZE(encoding_to_fgt)); + +check_mcb: for (int id = __MULTIPLE_CONTROL_BITS__; id < __COMPLEX_CONDITIONS__; id++) { const enum cgt_group_id *cgids; @@ -1056,13 +1114,26 @@ static enum trap_behaviour compute_trap_behaviour(struct kvm_vcpu *vcpu, return __compute_trap_behaviour(vcpu, tc.cgt, b); } +static bool check_fgt_bit(u64 val, const union trap_config tc) +{ + return ((val >> tc.bit) & 1) == tc.pol; +} + +#define sanitised_sys_reg(vcpu, reg) \ + ({ \ + u64 __val; \ + __val = __vcpu_sys_reg(vcpu, reg); \ + __val &= ~__ ## reg ## _RES0; \ + (__val); \ + }) + bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) { union trap_config tc; enum trap_behaviour b; bool is_read; u32 sysreg; - u64 esr; + u64 esr, val; if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) return false; @@ -1085,6 +1156,19 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) if (!tc.val) return false; + switch ((enum fgt_group_id)tc.fgt) { + case __NO_FGT_GROUP__: + break; + + case __NR_FGT_GROUP_IDS__: + /* Something is really wrong, bail out */ + WARN_ONCE(1, "__NR_FGT_GROUP_IDS__"); + return false; + } + + if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(val, tc)) + goto inject; + b = compute_trap_behaviour(vcpu, tc); if (((b & BEHAVE_FORWARD_READ) && is_read) || -- cgit From 5a24ea7869857251a83da1512209f76003bc09db Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:54 +0100 Subject: KVM: arm64: nv: Add trap forwarding for HFGxTR_EL2 Implement the trap forwarding for traps described by HFGxTR_EL2, reusing the Fine Grained Traps infrastructure previously implemented. Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Reviewed-by: Jing Zhang Link: https://lore.kernel.org/r/20230815183903.2735724-21-maz@kernel.org --- arch/arm64/kvm/emulate-nested.c | 71 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index 0da9d92ed921..0e34797515b6 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -938,6 +938,7 @@ static DEFINE_XARRAY(sr_forward_xa); enum fgt_group_id { __NO_FGT_GROUP__, + HFGxTR_GROUP, /* Must be last */ __NR_FGT_GROUP_IDS__ @@ -956,6 +957,69 @@ enum fgt_group_id { } static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = { + /* HFGRTR_EL2, HFGWTR_EL2 */ + SR_FGT(SYS_TPIDR2_EL0, HFGxTR, nTPIDR2_EL0, 0), + SR_FGT(SYS_SMPRI_EL1, HFGxTR, nSMPRI_EL1, 0), + SR_FGT(SYS_ACCDATA_EL1, HFGxTR, nACCDATA_EL1, 0), + SR_FGT(SYS_ERXADDR_EL1, HFGxTR, ERXADDR_EL1, 1), + SR_FGT(SYS_ERXPFGCDN_EL1, HFGxTR, ERXPFGCDN_EL1, 1), + SR_FGT(SYS_ERXPFGCTL_EL1, HFGxTR, ERXPFGCTL_EL1, 1), + SR_FGT(SYS_ERXPFGF_EL1, HFGxTR, ERXPFGF_EL1, 1), + SR_FGT(SYS_ERXMISC0_EL1, HFGxTR, ERXMISCn_EL1, 1), + SR_FGT(SYS_ERXMISC1_EL1, HFGxTR, ERXMISCn_EL1, 1), + SR_FGT(SYS_ERXMISC2_EL1, HFGxTR, ERXMISCn_EL1, 1), + SR_FGT(SYS_ERXMISC3_EL1, HFGxTR, ERXMISCn_EL1, 1), + SR_FGT(SYS_ERXSTATUS_EL1, HFGxTR, ERXSTATUS_EL1, 1), + SR_FGT(SYS_ERXCTLR_EL1, HFGxTR, ERXCTLR_EL1, 1), + SR_FGT(SYS_ERXFR_EL1, HFGxTR, ERXFR_EL1, 1), + SR_FGT(SYS_ERRSELR_EL1, HFGxTR, ERRSELR_EL1, 1), + SR_FGT(SYS_ERRIDR_EL1, HFGxTR, ERRIDR_EL1, 1), + SR_FGT(SYS_ICC_IGRPEN0_EL1, HFGxTR, ICC_IGRPENn_EL1, 1), + SR_FGT(SYS_ICC_IGRPEN1_EL1, HFGxTR, ICC_IGRPENn_EL1, 1), + SR_FGT(SYS_VBAR_EL1, HFGxTR, VBAR_EL1, 1), + SR_FGT(SYS_TTBR1_EL1, HFGxTR, TTBR1_EL1, 1), + SR_FGT(SYS_TTBR0_EL1, HFGxTR, TTBR0_EL1, 1), + SR_FGT(SYS_TPIDR_EL0, HFGxTR, TPIDR_EL0, 1), + SR_FGT(SYS_TPIDRRO_EL0, HFGxTR, TPIDRRO_EL0, 1), + SR_FGT(SYS_TPIDR_EL1, HFGxTR, TPIDR_EL1, 1), + SR_FGT(SYS_TCR_EL1, HFGxTR, TCR_EL1, 1), + SR_FGT(SYS_SCXTNUM_EL0, HFGxTR, SCXTNUM_EL0, 1), + SR_FGT(SYS_SCXTNUM_EL1, HFGxTR, SCXTNUM_EL1, 1), + SR_FGT(SYS_SCTLR_EL1, HFGxTR, SCTLR_EL1, 1), + SR_FGT(SYS_REVIDR_EL1, HFGxTR, REVIDR_EL1, 1), + SR_FGT(SYS_PAR_EL1, HFGxTR, PAR_EL1, 1), + SR_FGT(SYS_MPIDR_EL1, HFGxTR, MPIDR_EL1, 1), + SR_FGT(SYS_MIDR_EL1, HFGxTR, MIDR_EL1, 1), + SR_FGT(SYS_MAIR_EL1, HFGxTR, MAIR_EL1, 1), + SR_FGT(SYS_LORSA_EL1, HFGxTR, LORSA_EL1, 1), + SR_FGT(SYS_LORN_EL1, HFGxTR, LORN_EL1, 1), + SR_FGT(SYS_LORID_EL1, HFGxTR, LORID_EL1, 1), + SR_FGT(SYS_LOREA_EL1, HFGxTR, LOREA_EL1, 1), + SR_FGT(SYS_LORC_EL1, HFGxTR, LORC_EL1, 1), + SR_FGT(SYS_ISR_EL1, HFGxTR, ISR_EL1, 1), + SR_FGT(SYS_FAR_EL1, HFGxTR, FAR_EL1, 1), + SR_FGT(SYS_ESR_EL1, HFGxTR, ESR_EL1, 1), + SR_FGT(SYS_DCZID_EL0, HFGxTR, DCZID_EL0, 1), + SR_FGT(SYS_CTR_EL0, HFGxTR, CTR_EL0, 1), + SR_FGT(SYS_CSSELR_EL1, HFGxTR, CSSELR_EL1, 1), + SR_FGT(SYS_CPACR_EL1, HFGxTR, CPACR_EL1, 1), + SR_FGT(SYS_CONTEXTIDR_EL1, HFGxTR, CONTEXTIDR_EL1, 1), + SR_FGT(SYS_CLIDR_EL1, HFGxTR, CLIDR_EL1, 1), + SR_FGT(SYS_CCSIDR_EL1, HFGxTR, CCSIDR_EL1, 1), + SR_FGT(SYS_APIBKEYLO_EL1, HFGxTR, APIBKey, 1), + SR_FGT(SYS_APIBKEYHI_EL1, HFGxTR, APIBKey, 1), + SR_FGT(SYS_APIAKEYLO_EL1, HFGxTR, APIAKey, 1), + SR_FGT(SYS_APIAKEYHI_EL1, HFGxTR, APIAKey, 1), + SR_FGT(SYS_APGAKEYLO_EL1, HFGxTR, APGAKey, 1), + SR_FGT(SYS_APGAKEYHI_EL1, HFGxTR, APGAKey, 1), + SR_FGT(SYS_APDBKEYLO_EL1, HFGxTR, APDBKey, 1), + SR_FGT(SYS_APDBKEYHI_EL1, HFGxTR, APDBKey, 1), + SR_FGT(SYS_APDAKEYLO_EL1, HFGxTR, APDAKey, 1), + SR_FGT(SYS_APDAKEYHI_EL1, HFGxTR, APDAKey, 1), + SR_FGT(SYS_AMAIR_EL1, HFGxTR, AMAIR_EL1, 1), + SR_FGT(SYS_AIDR_EL1, HFGxTR, AIDR_EL1, 1), + SR_FGT(SYS_AFSR1_EL1, HFGxTR, AFSR1_EL1, 1), + SR_FGT(SYS_AFSR0_EL1, HFGxTR, AFSR0_EL1, 1), }; static union trap_config get_trap_config(u32 sysreg) @@ -1160,6 +1224,13 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) case __NO_FGT_GROUP__: break; + case HFGxTR_GROUP: + if (is_read) + val = sanitised_sys_reg(vcpu, HFGRTR_EL2); + else + val = sanitised_sys_reg(vcpu, HFGWTR_EL2); + break; + case __NR_FGT_GROUP_IDS__: /* Something is really wrong, bail out */ WARN_ONCE(1, "__NR_FGT_GROUP_IDS__"); -- cgit From 039f9f12de5fc761d2b32fa072071533aa8cbb3b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:55 +0100 Subject: KVM: arm64: nv: Add trap forwarding for HFGITR_EL2 Similarly, implement the trap forwarding for instructions affected by HFGITR_EL2. Note that the TLBI*nXS instructions should be affected by HCRX_EL2, which will be dealt with down the line. Also, ERET* and SVC traps are handled separately. Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Reviewed-by: Jing Zhang Link: https://lore.kernel.org/r/20230815183903.2735724-22-maz@kernel.org --- arch/arm64/include/asm/kvm_arm.h | 4 ++ arch/arm64/kvm/emulate-nested.c | 109 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 85908aa18908..809bc86acefd 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -354,6 +354,10 @@ #define __HFGWTR_EL2_MASK GENMASK(49, 0) #define __HFGWTR_EL2_nMASK (GENMASK(55, 54) | BIT(50)) +#define __HFGITR_EL2_RES0 GENMASK(63, 57) +#define __HFGITR_EL2_MASK GENMASK(54, 0) +#define __HFGITR_EL2_nMASK GENMASK(56, 55) + /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ #define HPFAR_MASK (~UL(0xf)) /* diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index 0e34797515b6..a1a7792db412 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -939,6 +939,7 @@ static DEFINE_XARRAY(sr_forward_xa); enum fgt_group_id { __NO_FGT_GROUP__, HFGxTR_GROUP, + HFGITR_GROUP, /* Must be last */ __NR_FGT_GROUP_IDS__ @@ -1020,6 +1021,110 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = { SR_FGT(SYS_AIDR_EL1, HFGxTR, AIDR_EL1, 1), SR_FGT(SYS_AFSR1_EL1, HFGxTR, AFSR1_EL1, 1), SR_FGT(SYS_AFSR0_EL1, HFGxTR, AFSR0_EL1, 1), + /* HFGITR_EL2 */ + SR_FGT(OP_BRB_IALL, HFGITR, nBRBIALL, 0), + SR_FGT(OP_BRB_INJ, HFGITR, nBRBINJ, 0), + SR_FGT(SYS_DC_CVAC, HFGITR, DCCVAC, 1), + SR_FGT(SYS_DC_CGVAC, HFGITR, DCCVAC, 1), + SR_FGT(SYS_DC_CGDVAC, HFGITR, DCCVAC, 1), + SR_FGT(OP_CPP_RCTX, HFGITR, CPPRCTX, 1), + SR_FGT(OP_DVP_RCTX, HFGITR, DVPRCTX, 1), + SR_FGT(OP_CFP_RCTX, HFGITR, CFPRCTX, 1), + SR_FGT(OP_TLBI_VAALE1, HFGITR, TLBIVAALE1, 1), + SR_FGT(OP_TLBI_VALE1, HFGITR, TLBIVALE1, 1), + SR_FGT(OP_TLBI_VAAE1, HFGITR, TLBIVAAE1, 1), + SR_FGT(OP_TLBI_ASIDE1, HFGITR, TLBIASIDE1, 1), + SR_FGT(OP_TLBI_VAE1, HFGITR, TLBIVAE1, 1), + SR_FGT(OP_TLBI_VMALLE1, HFGITR, TLBIVMALLE1, 1), + SR_FGT(OP_TLBI_RVAALE1, HFGITR, TLBIRVAALE1, 1), + SR_FGT(OP_TLBI_RVALE1, HFGITR, TLBIRVALE1, 1), + SR_FGT(OP_TLBI_RVAAE1, HFGITR, TLBIRVAAE1, 1), + SR_FGT(OP_TLBI_RVAE1, HFGITR, TLBIRVAE1, 1), + SR_FGT(OP_TLBI_RVAALE1IS, HFGITR, TLBIRVAALE1IS, 1), + SR_FGT(OP_TLBI_RVALE1IS, HFGITR, TLBIRVALE1IS, 1), + SR_FGT(OP_TLBI_RVAAE1IS, HFGITR, TLBIRVAAE1IS, 1), + SR_FGT(OP_TLBI_RVAE1IS, HFGITR, TLBIRVAE1IS, 1), + SR_FGT(OP_TLBI_VAALE1IS, HFGITR, TLBIVAALE1IS, 1), + SR_FGT(OP_TLBI_VALE1IS, HFGITR, TLBIVALE1IS, 1), + SR_FGT(OP_TLBI_VAAE1IS, HFGITR, TLBIVAAE1IS, 1), + SR_FGT(OP_TLBI_ASIDE1IS, HFGITR, TLBIASIDE1IS, 1), + SR_FGT(OP_TLBI_VAE1IS, HFGITR, TLBIVAE1IS, 1), + SR_FGT(OP_TLBI_VMALLE1IS, HFGITR, TLBIVMALLE1IS, 1), + SR_FGT(OP_TLBI_RVAALE1OS, HFGITR, TLBIRVAALE1OS, 1), + SR_FGT(OP_TLBI_RVALE1OS, HFGITR, TLBIRVALE1OS, 1), + SR_FGT(OP_TLBI_RVAAE1OS, HFGITR, TLBIRVAAE1OS, 1), + SR_FGT(OP_TLBI_RVAE1OS, HFGITR, TLBIRVAE1OS, 1), + SR_FGT(OP_TLBI_VAALE1OS, HFGITR, TLBIVAALE1OS, 1), + SR_FGT(OP_TLBI_VALE1OS, HFGITR, TLBIVALE1OS, 1), + SR_FGT(OP_TLBI_VAAE1OS, HFGITR, TLBIVAAE1OS, 1), + SR_FGT(OP_TLBI_ASIDE1OS, HFGITR, TLBIASIDE1OS, 1), + SR_FGT(OP_TLBI_VAE1OS, HFGITR, TLBIVAE1OS, 1), + SR_FGT(OP_TLBI_VMALLE1OS, HFGITR, TLBIVMALLE1OS, 1), + /* FIXME: nXS variants must be checked against HCRX_EL2.FGTnXS */ + SR_FGT(OP_TLBI_VAALE1NXS, HFGITR, TLBIVAALE1, 1), + SR_FGT(OP_TLBI_VALE1NXS, HFGITR, TLBIVALE1, 1), + SR_FGT(OP_TLBI_VAAE1NXS, HFGITR, TLBIVAAE1, 1), + SR_FGT(OP_TLBI_ASIDE1NXS, HFGITR, TLBIASIDE1, 1), + SR_FGT(OP_TLBI_VAE1NXS, HFGITR, TLBIVAE1, 1), + SR_FGT(OP_TLBI_VMALLE1NXS, HFGITR, TLBIVMALLE1, 1), + SR_FGT(OP_TLBI_RVAALE1NXS, HFGITR, TLBIRVAALE1, 1), + SR_FGT(OP_TLBI_RVALE1NXS, HFGITR, TLBIRVALE1, 1), + SR_FGT(OP_TLBI_RVAAE1NXS, HFGITR, TLBIRVAAE1, 1), + SR_FGT(OP_TLBI_RVAE1NXS, HFGITR, TLBIRVAE1, 1), + SR_FGT(OP_TLBI_RVAALE1ISNXS, HFGITR, TLBIRVAALE1IS, 1), + SR_FGT(OP_TLBI_RVALE1ISNXS, HFGITR, TLBIRVALE1IS, 1), + SR_FGT(OP_TLBI_RVAAE1ISNXS, HFGITR, TLBIRVAAE1IS, 1), + SR_FGT(OP_TLBI_RVAE1ISNXS, HFGITR, TLBIRVAE1IS, 1), + SR_FGT(OP_TLBI_VAALE1ISNXS, HFGITR, TLBIVAALE1IS, 1), + SR_FGT(OP_TLBI_VALE1ISNXS, HFGITR, TLBIVALE1IS, 1), + SR_FGT(OP_TLBI_VAAE1ISNXS, HFGITR, TLBIVAAE1IS, 1), + SR_FGT(OP_TLBI_ASIDE1ISNXS, HFGITR, TLBIASIDE1IS, 1), + SR_FGT(OP_TLBI_VAE1ISNXS, HFGITR, TLBIVAE1IS, 1), + SR_FGT(OP_TLBI_VMALLE1ISNXS, HFGITR, TLBIVMALLE1IS, 1), + SR_FGT(OP_TLBI_RVAALE1OSNXS, HFGITR, TLBIRVAALE1OS, 1), + SR_FGT(OP_TLBI_RVALE1OSNXS, HFGITR, TLBIRVALE1OS, 1), + SR_FGT(OP_TLBI_RVAAE1OSNXS, HFGITR, TLBIRVAAE1OS, 1), + SR_FGT(OP_TLBI_RVAE1OSNXS, HFGITR, TLBIRVAE1OS, 1), + SR_FGT(OP_TLBI_VAALE1OSNXS, HFGITR, TLBIVAALE1OS, 1), + SR_FGT(OP_TLBI_VALE1OSNXS, HFGITR, TLBIVALE1OS, 1), + SR_FGT(OP_TLBI_VAAE1OSNXS, HFGITR, TLBIVAAE1OS, 1), + SR_FGT(OP_TLBI_ASIDE1OSNXS, HFGITR, TLBIASIDE1OS, 1), + SR_FGT(OP_TLBI_VAE1OSNXS, HFGITR, TLBIVAE1OS, 1), + SR_FGT(OP_TLBI_VMALLE1OSNXS, HFGITR, TLBIVMALLE1OS, 1), + SR_FGT(OP_AT_S1E1WP, HFGITR, ATS1E1WP, 1), + SR_FGT(OP_AT_S1E1RP, HFGITR, ATS1E1RP, 1), + SR_FGT(OP_AT_S1E0W, HFGITR, ATS1E0W, 1), + SR_FGT(OP_AT_S1E0R, HFGITR, ATS1E0R, 1), + SR_FGT(OP_AT_S1E1W, HFGITR, ATS1E1W, 1), + SR_FGT(OP_AT_S1E1R, HFGITR, ATS1E1R, 1), + SR_FGT(SYS_DC_ZVA, HFGITR, DCZVA, 1), + SR_FGT(SYS_DC_GVA, HFGITR, DCZVA, 1), + SR_FGT(SYS_DC_GZVA, HFGITR, DCZVA, 1), + SR_FGT(SYS_DC_CIVAC, HFGITR, DCCIVAC, 1), + SR_FGT(SYS_DC_CIGVAC, HFGITR, DCCIVAC, 1), + SR_FGT(SYS_DC_CIGDVAC, HFGITR, DCCIVAC, 1), + SR_FGT(SYS_DC_CVADP, HFGITR, DCCVADP, 1), + SR_FGT(SYS_DC_CGVADP, HFGITR, DCCVADP, 1), + SR_FGT(SYS_DC_CGDVADP, HFGITR, DCCVADP, 1), + SR_FGT(SYS_DC_CVAP, HFGITR, DCCVAP, 1), + SR_FGT(SYS_DC_CGVAP, HFGITR, DCCVAP, 1), + SR_FGT(SYS_DC_CGDVAP, HFGITR, DCCVAP, 1), + SR_FGT(SYS_DC_CVAU, HFGITR, DCCVAU, 1), + SR_FGT(SYS_DC_CISW, HFGITR, DCCISW, 1), + SR_FGT(SYS_DC_CIGSW, HFGITR, DCCISW, 1), + SR_FGT(SYS_DC_CIGDSW, HFGITR, DCCISW, 1), + SR_FGT(SYS_DC_CSW, HFGITR, DCCSW, 1), + SR_FGT(SYS_DC_CGSW, HFGITR, DCCSW, 1), + SR_FGT(SYS_DC_CGDSW, HFGITR, DCCSW, 1), + SR_FGT(SYS_DC_ISW, HFGITR, DCISW, 1), + SR_FGT(SYS_DC_IGSW, HFGITR, DCISW, 1), + SR_FGT(SYS_DC_IGDSW, HFGITR, DCISW, 1), + SR_FGT(SYS_DC_IVAC, HFGITR, DCIVAC, 1), + SR_FGT(SYS_DC_IGVAC, HFGITR, DCIVAC, 1), + SR_FGT(SYS_DC_IGDVAC, HFGITR, DCIVAC, 1), + SR_FGT(SYS_IC_IVAU, HFGITR, ICIVAU, 1), + SR_FGT(SYS_IC_IALLU, HFGITR, ICIALLU, 1), + SR_FGT(SYS_IC_IALLUIS, HFGITR, ICIALLUIS, 1), }; static union trap_config get_trap_config(u32 sysreg) @@ -1231,6 +1336,10 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) val = sanitised_sys_reg(vcpu, HFGWTR_EL2); break; + case HFGITR_GROUP: + val = sanitised_sys_reg(vcpu, HFGITR_EL2); + break; + case __NR_FGT_GROUP_IDS__: /* Something is really wrong, bail out */ WARN_ONCE(1, "__NR_FGT_GROUP_IDS__"); -- cgit From d0be0b2ede13247c53745d50e2a5993f2b27c802 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:56 +0100 Subject: KVM: arm64: nv: Add trap forwarding for HDFGxTR_EL2 ... and finally, the Debug version of FGT, with its *enormous* list of trapped registers. Reviewed-by: Suzuki K Poulose Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Reviewed-by: Jing Zhang Link: https://lore.kernel.org/r/20230815183903.2735724-23-maz@kernel.org --- arch/arm64/include/asm/kvm_arm.h | 11 + arch/arm64/kvm/emulate-nested.c | 474 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 485 insertions(+) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 809bc86acefd..d229f238c3b6 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -358,6 +358,17 @@ #define __HFGITR_EL2_MASK GENMASK(54, 0) #define __HFGITR_EL2_nMASK GENMASK(56, 55) +#define __HDFGRTR_EL2_RES0 (BIT(49) | BIT(42) | GENMASK(39, 38) | \ + GENMASK(21, 20) | BIT(8)) +#define __HDFGRTR_EL2_MASK ~__HDFGRTR_EL2_nMASK +#define __HDFGRTR_EL2_nMASK GENMASK(62, 59) + +#define __HDFGWTR_EL2_RES0 (BIT(63) | GENMASK(59, 58) | BIT(51) | BIT(47) | \ + BIT(43) | GENMASK(40, 38) | BIT(34) | BIT(30) | \ + BIT(22) | BIT(9) | BIT(6)) +#define __HDFGWTR_EL2_MASK ~__HDFGWTR_EL2_nMASK +#define __HDFGWTR_EL2_nMASK GENMASK(62, 60) + /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ #define HPFAR_MASK (~UL(0xf)) /* diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index a1a7792db412..c9662f9a345e 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -939,6 +939,8 @@ static DEFINE_XARRAY(sr_forward_xa); enum fgt_group_id { __NO_FGT_GROUP__, HFGxTR_GROUP, + HDFGRTR_GROUP, + HDFGWTR_GROUP, HFGITR_GROUP, /* Must be last */ @@ -1125,6 +1127,470 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = { SR_FGT(SYS_IC_IVAU, HFGITR, ICIVAU, 1), SR_FGT(SYS_IC_IALLU, HFGITR, ICIALLU, 1), SR_FGT(SYS_IC_IALLUIS, HFGITR, ICIALLUIS, 1), + /* HDFGRTR_EL2 */ + SR_FGT(SYS_PMBIDR_EL1, HDFGRTR, PMBIDR_EL1, 1), + SR_FGT(SYS_PMSNEVFR_EL1, HDFGRTR, nPMSNEVFR_EL1, 0), + SR_FGT(SYS_BRBINF_EL1(0), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(1), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(2), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(3), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(4), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(5), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(6), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(7), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(8), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(9), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(10), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(11), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(12), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(13), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(14), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(15), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(16), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(17), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(18), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(19), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(20), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(21), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(22), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(23), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(24), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(25), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(26), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(27), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(28), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(29), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(30), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINF_EL1(31), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBINFINJ_EL1, HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(0), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(1), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(2), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(3), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(4), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(5), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(6), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(7), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(8), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(9), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(10), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(11), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(12), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(13), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(14), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(15), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(16), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(17), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(18), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(19), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(20), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(21), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(22), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(23), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(24), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(25), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(26), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(27), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(28), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(29), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(30), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRC_EL1(31), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBSRCINJ_EL1, HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(0), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(1), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(2), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(3), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(4), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(5), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(6), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(7), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(8), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(9), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(10), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(11), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(12), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(13), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(14), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(15), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(16), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(17), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(18), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(19), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(20), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(21), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(22), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(23), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(24), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(25), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(26), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(27), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(28), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(29), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(30), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGT_EL1(31), HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTGTINJ_EL1, HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBTS_EL1, HDFGRTR, nBRBDATA, 0), + SR_FGT(SYS_BRBCR_EL1, HDFGRTR, nBRBCTL, 0), + SR_FGT(SYS_BRBFCR_EL1, HDFGRTR, nBRBCTL, 0), + SR_FGT(SYS_BRBIDR0_EL1, HDFGRTR, nBRBIDR, 0), + SR_FGT(SYS_PMCEID0_EL0, HDFGRTR, PMCEIDn_EL0, 1), + SR_FGT(SYS_PMCEID1_EL0, HDFGRTR, PMCEIDn_EL0, 1), + SR_FGT(SYS_PMUSERENR_EL0, HDFGRTR, PMUSERENR_EL0, 1), + SR_FGT(SYS_TRBTRG_EL1, HDFGRTR, TRBTRG_EL1, 1), + SR_FGT(SYS_TRBSR_EL1, HDFGRTR, TRBSR_EL1, 1), + SR_FGT(SYS_TRBPTR_EL1, HDFGRTR, TRBPTR_EL1, 1), + SR_FGT(SYS_TRBMAR_EL1, HDFGRTR, TRBMAR_EL1, 1), + SR_FGT(SYS_TRBLIMITR_EL1, HDFGRTR, TRBLIMITR_EL1, 1), + SR_FGT(SYS_TRBIDR_EL1, HDFGRTR, TRBIDR_EL1, 1), + SR_FGT(SYS_TRBBASER_EL1, HDFGRTR, TRBBASER_EL1, 1), + SR_FGT(SYS_TRCVICTLR, HDFGRTR, TRCVICTLR, 1), + SR_FGT(SYS_TRCSTATR, HDFGRTR, TRCSTATR, 1), + SR_FGT(SYS_TRCSSCSR(0), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSSCSR(1), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSSCSR(2), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSSCSR(3), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSSCSR(4), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSSCSR(5), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSSCSR(6), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSSCSR(7), HDFGRTR, TRCSSCSRn, 1), + SR_FGT(SYS_TRCSEQSTR, HDFGRTR, TRCSEQSTR, 1), + SR_FGT(SYS_TRCPRGCTLR, HDFGRTR, TRCPRGCTLR, 1), + SR_FGT(SYS_TRCOSLSR, HDFGRTR, TRCOSLSR, 1), + SR_FGT(SYS_TRCIMSPEC(0), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCIMSPEC(1), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCIMSPEC(2), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCIMSPEC(3), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCIMSPEC(4), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCIMSPEC(5), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCIMSPEC(6), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCIMSPEC(7), HDFGRTR, TRCIMSPECn, 1), + SR_FGT(SYS_TRCDEVARCH, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCDEVID, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR0, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR1, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR2, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR3, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR4, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR5, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR6, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR7, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR8, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR9, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR10, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR11, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR12, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCIDR13, HDFGRTR, TRCID, 1), + SR_FGT(SYS_TRCCNTVR(0), HDFGRTR, TRCCNTVRn, 1), + SR_FGT(SYS_TRCCNTVR(1), HDFGRTR, TRCCNTVRn, 1), + SR_FGT(SYS_TRCCNTVR(2), HDFGRTR, TRCCNTVRn, 1), + SR_FGT(SYS_TRCCNTVR(3), HDFGRTR, TRCCNTVRn, 1), + SR_FGT(SYS_TRCCLAIMCLR, HDFGRTR, TRCCLAIM, 1), + SR_FGT(SYS_TRCCLAIMSET, HDFGRTR, TRCCLAIM, 1), + SR_FGT(SYS_TRCAUXCTLR, HDFGRTR, TRCAUXCTLR, 1), + SR_FGT(SYS_TRCAUTHSTATUS, HDFGRTR, TRCAUTHSTATUS, 1), + SR_FGT(SYS_TRCACATR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(4), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(5), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(6), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(7), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(8), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(9), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(10), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(11), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(12), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(13), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(14), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACATR(15), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(4), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(5), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(6), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(7), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(8), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(9), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(10), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(11), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(12), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(13), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(14), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCACVR(15), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCBBCTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCCCTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCCTLR0, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCCTLR1, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(4), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(5), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(6), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCIDCVR(7), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTCTLR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTCTLR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTCTLR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTCTLR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTRLDVR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTRLDVR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTRLDVR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCNTRLDVR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCCONFIGR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCEVENTCTL0R, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCEVENTCTL1R, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCEXTINSELR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCEXTINSELR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCEXTINSELR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCEXTINSELR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCQCTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(4), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(5), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(6), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(7), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(8), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(9), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(10), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(11), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(12), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(13), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(14), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(15), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(16), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(17), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(18), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(19), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(20), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(21), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(22), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(23), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(24), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(25), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(26), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(27), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(28), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(29), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(30), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSCTLR(31), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCRSR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSEQEVR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSEQEVR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSEQEVR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSEQRSTEVR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(4), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(5), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(6), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSCCR(7), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(4), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(5), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(6), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSSPCICR(7), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSTALLCTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCSYNCPR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCTRACEIDR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCTSCTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVIIECTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVIPCSSCTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVISSCTLR, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCCTLR0, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCCTLR1, HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(0), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(1), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(2), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(3), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(4), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(5), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(6), HDFGRTR, TRC, 1), + SR_FGT(SYS_TRCVMIDCVR(7), HDFGRTR, TRC, 1), + SR_FGT(SYS_PMSLATFR_EL1, HDFGRTR, PMSLATFR_EL1, 1), + SR_FGT(SYS_PMSIRR_EL1, HDFGRTR, PMSIRR_EL1, 1), + SR_FGT(SYS_PMSIDR_EL1, HDFGRTR, PMSIDR_EL1, 1), + SR_FGT(SYS_PMSICR_EL1, HDFGRTR, PMSICR_EL1, 1), + SR_FGT(SYS_PMSFCR_EL1, HDFGRTR, PMSFCR_EL1, 1), + SR_FGT(SYS_PMSEVFR_EL1, HDFGRTR, PMSEVFR_EL1, 1), + SR_FGT(SYS_PMSCR_EL1, HDFGRTR, PMSCR_EL1, 1), + SR_FGT(SYS_PMBSR_EL1, HDFGRTR, PMBSR_EL1, 1), + SR_FGT(SYS_PMBPTR_EL1, HDFGRTR, PMBPTR_EL1, 1), + SR_FGT(SYS_PMBLIMITR_EL1, HDFGRTR, PMBLIMITR_EL1, 1), + SR_FGT(SYS_PMMIR_EL1, HDFGRTR, PMMIR_EL1, 1), + SR_FGT(SYS_PMSELR_EL0, HDFGRTR, PMSELR_EL0, 1), + SR_FGT(SYS_PMOVSCLR_EL0, HDFGRTR, PMOVS, 1), + SR_FGT(SYS_PMOVSSET_EL0, HDFGRTR, PMOVS, 1), + SR_FGT(SYS_PMINTENCLR_EL1, HDFGRTR, PMINTEN, 1), + SR_FGT(SYS_PMINTENSET_EL1, HDFGRTR, PMINTEN, 1), + SR_FGT(SYS_PMCNTENCLR_EL0, HDFGRTR, PMCNTEN, 1), + SR_FGT(SYS_PMCNTENSET_EL0, HDFGRTR, PMCNTEN, 1), + SR_FGT(SYS_PMCCNTR_EL0, HDFGRTR, PMCCNTR_EL0, 1), + SR_FGT(SYS_PMCCFILTR_EL0, HDFGRTR, PMCCFILTR_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(0), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(1), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(2), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(3), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(4), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(5), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(6), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(7), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(8), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(9), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(10), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(11), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(12), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(13), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(14), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(15), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(16), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(17), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(18), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(19), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(20), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(21), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(22), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(23), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(24), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(25), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(26), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(27), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(28), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(29), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVTYPERn_EL0(30), HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(0), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(1), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(2), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(3), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(4), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(5), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(6), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(7), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(8), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(9), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(10), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(11), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(12), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(13), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(14), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(15), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(16), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(17), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(18), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(19), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(20), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(21), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(22), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(23), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(24), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(25), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(26), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(27), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(28), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(29), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_PMEVCNTRn_EL0(30), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT(SYS_OSDLR_EL1, HDFGRTR, OSDLR_EL1, 1), + SR_FGT(SYS_OSECCR_EL1, HDFGRTR, OSECCR_EL1, 1), + SR_FGT(SYS_OSLSR_EL1, HDFGRTR, OSLSR_EL1, 1), + SR_FGT(SYS_DBGPRCR_EL1, HDFGRTR, DBGPRCR_EL1, 1), + SR_FGT(SYS_DBGAUTHSTATUS_EL1, HDFGRTR, DBGAUTHSTATUS_EL1, 1), + SR_FGT(SYS_DBGCLAIMSET_EL1, HDFGRTR, DBGCLAIM, 1), + SR_FGT(SYS_DBGCLAIMCLR_EL1, HDFGRTR, DBGCLAIM, 1), + SR_FGT(SYS_MDSCR_EL1, HDFGRTR, MDSCR_EL1, 1), + /* + * The trap bits capture *64* debug registers per bit, but the + * ARM ARM only describes the encoding for the first 16, and + * we don't really support more than that anyway. + */ + SR_FGT(SYS_DBGWVRn_EL1(0), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(1), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(2), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(3), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(4), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(5), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(6), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(7), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(8), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(9), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(10), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(11), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(12), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(13), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(14), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWVRn_EL1(15), HDFGRTR, DBGWVRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(0), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(1), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(2), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(3), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(4), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(5), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(6), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(7), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(8), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(9), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(10), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(11), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(12), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(13), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(14), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGWCRn_EL1(15), HDFGRTR, DBGWCRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(0), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(1), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(2), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(3), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(4), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(5), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(6), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(7), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(8), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(9), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(10), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(11), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(12), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(13), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(14), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBVRn_EL1(15), HDFGRTR, DBGBVRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(0), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(1), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(2), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(3), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(4), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(5), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(6), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(7), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(8), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(9), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(10), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(11), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(12), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(13), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(14), HDFGRTR, DBGBCRn_EL1, 1), + SR_FGT(SYS_DBGBCRn_EL1(15), HDFGRTR, DBGBCRn_EL1, 1), + /* + * HDFGWTR_EL2 + * + * Although HDFGRTR_EL2 and HDFGWTR_EL2 registers largely + * overlap in their bit assignment, there are a number of bits + * that are RES0 on one side, and an actual trap bit on the + * other. The policy chosen here is to describe all the + * read-side mappings, and only the write-side mappings that + * differ from the read side, and the trap handler will pick + * the correct shadow register based on the access type. + */ + SR_FGT(SYS_TRFCR_EL1, HDFGWTR, TRFCR_EL1, 1), + SR_FGT(SYS_TRCOSLAR, HDFGWTR, TRCOSLAR, 1), + SR_FGT(SYS_PMCR_EL0, HDFGWTR, PMCR_EL0, 1), + SR_FGT(SYS_PMSWINC_EL0, HDFGWTR, PMSWINC_EL0, 1), + SR_FGT(SYS_OSLAR_EL1, HDFGWTR, OSLAR_EL1, 1), }; static union trap_config get_trap_config(u32 sysreg) @@ -1336,6 +1802,14 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) val = sanitised_sys_reg(vcpu, HFGWTR_EL2); break; + case HDFGRTR_GROUP: + case HDFGWTR_GROUP: + if (is_read) + val = sanitised_sys_reg(vcpu, HDFGRTR_EL2); + else + val = sanitised_sys_reg(vcpu, HDFGWTR_EL2); + break; + case HFGITR_GROUP: val = sanitised_sys_reg(vcpu, HFGITR_EL2); break; -- cgit From a77b31dce4375be15014b10e8f94a149592ea6b6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:57 +0100 Subject: KVM: arm64: nv: Add SVC trap forwarding HFGITR_EL2 allows the trap of SVC instructions to EL2. Allow these traps to be forwarded. Take this opportunity to deny any 32bit activity when NV is enabled. Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Reviewed-by: Jing Zhang Link: https://lore.kernel.org/r/20230815183903.2735724-24-maz@kernel.org --- arch/arm64/kvm/arm.c | 4 ++++ arch/arm64/kvm/handle_exit.c | 12 ++++++++++++ 2 files changed, 16 insertions(+) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 72dc53a75d1c..8b51570a76f8 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -818,6 +819,9 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu) if (likely(!vcpu_mode_is_32bit(vcpu))) return false; + if (vcpu_has_nv(vcpu)) + return true; + return !kvm_supports_32bit_el0(); } diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 6dcd6604b6bc..3b86d534b995 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -226,6 +226,17 @@ static int kvm_handle_eret(struct kvm_vcpu *vcpu) return 1; } +static int handle_svc(struct kvm_vcpu *vcpu) +{ + /* + * So far, SVC traps only for NV via HFGITR_EL2. A SVC from a + * 32bit guest would be caught by vpcu_mode_is_bad_32bit(), so + * we should only have to deal with a 64 bit exception. + */ + kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); + return 1; +} + static exit_handle_fn arm_exit_handlers[] = { [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec, [ESR_ELx_EC_WFx] = kvm_handle_wfx, @@ -239,6 +250,7 @@ static exit_handle_fn arm_exit_handlers[] = { [ESR_ELx_EC_SMC32] = handle_smc, [ESR_ELx_EC_HVC64] = handle_hvc, [ESR_ELx_EC_SMC64] = handle_smc, + [ESR_ELx_EC_SVC64] = handle_svc, [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg, [ESR_ELx_EC_SVE] = handle_sve, [ESR_ELx_EC_ERET] = kvm_handle_eret, -- cgit From ea3b27d8dea081f1693b310322ae71fa75d1875b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:58 +0100 Subject: KVM: arm64: nv: Expand ERET trap forwarding to handle FGT We already handle ERET being trapped from a L1 guest in hyp context. However, with FGT, we can also have ERET being trapped from L2, and this needs to be reinjected into L1. Add the required exception routing. Signed-off-by: Marc Zyngier Reviewed-by: Jing Zhang Link: https://lore.kernel.org/r/20230815183903.2735724-25-maz@kernel.org --- arch/arm64/kvm/handle_exit.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 3b86d534b995..617ae6dea5d5 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -222,7 +222,22 @@ static int kvm_handle_eret(struct kvm_vcpu *vcpu) if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_ERET_ISS_ERET) return kvm_handle_ptrauth(vcpu); - kvm_emulate_nested_eret(vcpu); + /* + * If we got here, two possibilities: + * + * - the guest is in EL2, and we need to fully emulate ERET + * + * - the guest is in EL1, and we need to reinject the + * exception into the L1 hypervisor. + * + * If KVM ever traps ERET for its own use, we'll have to + * revisit this. + */ + if (is_hyp_ctxt(vcpu)) + kvm_emulate_nested_eret(vcpu); + else + kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); + return 1; } -- cgit From d4d2dacc7cddc37aaa7c6eed8665d533d1037e1e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:38:59 +0100 Subject: KVM: arm64: nv: Add switching support for HFGxTR/HDFGxTR Now that we can evaluate the FGT registers, allow them to be merged with the hypervisor's own configuration (in the case of HFG{RW}TR_EL2) or simply set for HFGITR_EL2, HDGFRTR_EL2 and HDFGWTR_EL2. Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Reviewed-by: Jing Zhang Link: https://lore.kernel.org/r/20230815183903.2735724-26-maz@kernel.org --- arch/arm64/kvm/hyp/include/hyp/switch.h | 48 +++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index e096b16e85fd..a4750070563f 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -70,6 +70,13 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) } } +#define compute_clr_set(vcpu, reg, clr, set) \ + do { \ + u64 hfg; \ + hfg = __vcpu_sys_reg(vcpu, reg) & ~__ ## reg ## _RES0; \ + set |= hfg & __ ## reg ## _MASK; \ + clr |= ~hfg & __ ## reg ## _nMASK; \ + } while(0) static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) @@ -97,6 +104,10 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) w_set |= HFGxTR_EL2_TCR_EL1_MASK; + if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { + compute_clr_set(vcpu, HFGRTR_EL2, r_clr, r_set); + compute_clr_set(vcpu, HFGWTR_EL2, w_clr, w_set); + } /* The default is not to trap anything but ACCDATA_EL1 */ r_val = __HFGRTR_EL2_nMASK & ~HFGxTR_EL2_nACCDATA_EL1; @@ -109,6 +120,38 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) write_sysreg_s(r_val, SYS_HFGRTR_EL2); write_sysreg_s(w_val, SYS_HFGWTR_EL2); + + if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) + return; + + ctxt_sys_reg(hctxt, HFGITR_EL2) = read_sysreg_s(SYS_HFGITR_EL2); + + r_set = r_clr = 0; + compute_clr_set(vcpu, HFGITR_EL2, r_clr, r_set); + r_val = __HFGITR_EL2_nMASK; + r_val |= r_set; + r_val &= ~r_clr; + + write_sysreg_s(r_val, SYS_HFGITR_EL2); + + ctxt_sys_reg(hctxt, HDFGRTR_EL2) = read_sysreg_s(SYS_HDFGRTR_EL2); + ctxt_sys_reg(hctxt, HDFGWTR_EL2) = read_sysreg_s(SYS_HDFGWTR_EL2); + + r_clr = r_set = w_clr = w_set = 0; + + compute_clr_set(vcpu, HDFGRTR_EL2, r_clr, r_set); + compute_clr_set(vcpu, HDFGWTR_EL2, w_clr, w_set); + + r_val = __HDFGRTR_EL2_nMASK; + r_val |= r_set; + r_val &= ~r_clr; + + w_val = __HDFGWTR_EL2_nMASK; + w_val |= w_set; + w_val &= ~w_clr; + + write_sysreg_s(r_val, SYS_HDFGRTR_EL2); + write_sysreg_s(w_val, SYS_HDFGWTR_EL2); } static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) @@ -121,7 +164,12 @@ static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2); write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2); + if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) + return; + write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2); + write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2); + write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2); } static inline void __activate_traps_common(struct kvm_vcpu *vcpu) -- cgit From 0a5d28433ad94cc38ecb3dbb5138b8ae30ffb98a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:39:00 +0100 Subject: KVM: arm64: nv: Expose FGT to nested guests Now that we have FGT support, expose the feature to NV guests. Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Reviewed-by: Jing Zhang Link: https://lore.kernel.org/r/20230815183903.2735724-27-maz@kernel.org --- arch/arm64/kvm/nested.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 7f80f385d9e8..3facd8918ae3 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -71,8 +71,9 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p, break; case SYS_ID_AA64MMFR0_EL1: - /* Hide ECV, FGT, ExS, Secure Memory */ - val &= ~(GENMASK_ULL(63, 43) | + /* Hide ECV, ExS, Secure Memory */ + val &= ~(NV_FTR(MMFR0, ECV) | + NV_FTR(MMFR0, EXS) | NV_FTR(MMFR0, TGRAN4_2) | NV_FTR(MMFR0, TGRAN16_2) | NV_FTR(MMFR0, TGRAN64_2) | -- cgit From a63cf31139b7f41d468dc8ef63dbf6bae213d960 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:39:01 +0100 Subject: KVM: arm64: Move HCRX_EL2 switch to load/put on VHE systems Although the nVHE behaviour requires HCRX_EL2 to be switched on each switch between host and guest, there is nothing in this register that would affect a VHE host. It is thus possible to save/restore this register on load/put on VHE systems, avoiding unnecessary sysreg access on the hot path. Additionally, it avoids unnecessary traps when running with NV. To achieve this, simply move the read/writes to the *_common() helpers, which are called on load/put on VHE, and more eagerly on nVHE. Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Reviewed-by: Jing Zhang Link: https://lore.kernel.org/r/20230815183903.2735724-28-maz@kernel.org --- arch/arm64/kvm/hyp/include/hyp/switch.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index a4750070563f..060c5a0409e5 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -197,6 +197,9 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); + if (cpus_have_final_cap(ARM64_HAS_HCX)) + write_sysreg_s(HCRX_GUEST_FLAGS, SYS_HCRX_EL2); + __activate_traps_hfgxtr(vcpu); } @@ -213,6 +216,9 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU); } + if (cpus_have_final_cap(ARM64_HAS_HCX)) + write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2); + __deactivate_traps_hfgxtr(vcpu); } @@ -227,9 +233,6 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu) if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); - - if (cpus_have_final_cap(ARM64_HAS_HCX)) - write_sysreg_s(HCRX_GUEST_FLAGS, SYS_HCRX_EL2); } static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) @@ -244,9 +247,6 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) vcpu->arch.hcr_el2 &= ~HCR_VSE; vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE; } - - if (cpus_have_final_cap(ARM64_HAS_HCX)) - write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2); } static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) -- cgit From 03fb54d0aa73cc14e51f6611eb3289e4fec15184 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Aug 2023 19:39:02 +0100 Subject: KVM: arm64: nv: Add support for HCRX_EL2 HCRX_EL2 has an interesting effect on HFGITR_EL2, as it conditions the traps of TLBI*nXS. Expand the FGT support to add a new Fine Grained Filter that will get checked when the instruction gets trapped, allowing the shadow register to override the trap as needed. Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Reviewed-by: Jing Zhang Link: https://lore.kernel.org/r/20230815183903.2735724-29-maz@kernel.org --- arch/arm64/include/asm/kvm_arm.h | 5 ++ arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/emulate-nested.c | 94 +++++++++++++++++++++------------ arch/arm64/kvm/hyp/include/hyp/switch.h | 15 +++++- arch/arm64/kvm/nested.c | 3 +- arch/arm64/kvm/sys_regs.c | 2 + 6 files changed, 83 insertions(+), 37 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index d229f238c3b6..137f732789c9 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -369,6 +369,11 @@ #define __HDFGWTR_EL2_MASK ~__HDFGWTR_EL2_nMASK #define __HDFGWTR_EL2_nMASK GENMASK(62, 60) +/* Similar definitions for HCRX_EL2 */ +#define __HCRX_EL2_RES0 (GENMASK(63, 16) | GENMASK(13, 12)) +#define __HCRX_EL2_MASK (0) +#define __HCRX_EL2_nMASK (GENMASK(15, 14) | GENMASK(4, 0)) + /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ #define HPFAR_MASK (~UL(0xf)) /* diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index cb1c5c54cedd..93c541111dea 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -380,6 +380,7 @@ enum vcpu_sysreg { CPTR_EL2, /* Architectural Feature Trap Register (EL2) */ HSTR_EL2, /* Hypervisor System Trap Register */ HACR_EL2, /* Hypervisor Auxiliary Control Register */ + HCRX_EL2, /* Extended Hypervisor Configuration Register */ TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */ TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */ TCR_EL2, /* Translation Control Register (EL2) */ diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index c9662f9a345e..1cc606c16416 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -426,11 +426,13 @@ static const complex_condition_check ccc[] = { * [13:10] enum fgt_group_id (4 bits) * [19:14] bit number in the FGT register (6 bits) * [20] trap polarity (1 bit) - * [62:21] Unused (42 bits) + * [25:21] FG filter (5 bits) + * [62:26] Unused (37 bits) * [63] RES0 - Must be zero, as lost on insertion in the xarray */ #define TC_CGT_BITS 10 #define TC_FGT_BITS 4 +#define TC_FGF_BITS 5 union trap_config { u64 val; @@ -439,7 +441,8 @@ union trap_config { unsigned long fgt:TC_FGT_BITS; /* Fine Grained Trap id */ unsigned long bit:6; /* Bit number */ unsigned long pol:1; /* Polarity */ - unsigned long unused:42; /* Unused, should be zero */ + unsigned long fgf:TC_FGF_BITS; /* Fine Grained Filter */ + unsigned long unused:37; /* Unused, should be zero */ unsigned long mbz:1; /* Must Be Zero */ }; }; @@ -947,7 +950,15 @@ enum fgt_group_id { __NR_FGT_GROUP_IDS__ }; -#define SR_FGT(sr, g, b, p) \ +enum fg_filter_id { + __NO_FGF__, + HCRX_FGTnXS, + + /* Must be last */ + __NR_FG_FILTER_IDS__ +}; + +#define SR_FGF(sr, g, b, p, f) \ { \ .encoding = sr, \ .end = sr, \ @@ -955,10 +966,13 @@ enum fgt_group_id { .fgt = g ## _GROUP, \ .bit = g ## _EL2_ ## b ## _SHIFT, \ .pol = p, \ + .fgf = f, \ }, \ .line = __LINE__, \ } +#define SR_FGT(sr, g, b, p) SR_FGF(sr, g, b, p, __NO_FGF__) + static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = { /* HFGRTR_EL2, HFGWTR_EL2 */ SR_FGT(SYS_TPIDR2_EL0, HFGxTR, nTPIDR2_EL0, 0), @@ -1062,37 +1076,37 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = { SR_FGT(OP_TLBI_ASIDE1OS, HFGITR, TLBIASIDE1OS, 1), SR_FGT(OP_TLBI_VAE1OS, HFGITR, TLBIVAE1OS, 1), SR_FGT(OP_TLBI_VMALLE1OS, HFGITR, TLBIVMALLE1OS, 1), - /* FIXME: nXS variants must be checked against HCRX_EL2.FGTnXS */ - SR_FGT(OP_TLBI_VAALE1NXS, HFGITR, TLBIVAALE1, 1), - SR_FGT(OP_TLBI_VALE1NXS, HFGITR, TLBIVALE1, 1), - SR_FGT(OP_TLBI_VAAE1NXS, HFGITR, TLBIVAAE1, 1), - SR_FGT(OP_TLBI_ASIDE1NXS, HFGITR, TLBIASIDE1, 1), - SR_FGT(OP_TLBI_VAE1NXS, HFGITR, TLBIVAE1, 1), - SR_FGT(OP_TLBI_VMALLE1NXS, HFGITR, TLBIVMALLE1, 1), - SR_FGT(OP_TLBI_RVAALE1NXS, HFGITR, TLBIRVAALE1, 1), - SR_FGT(OP_TLBI_RVALE1NXS, HFGITR, TLBIRVALE1, 1), - SR_FGT(OP_TLBI_RVAAE1NXS, HFGITR, TLBIRVAAE1, 1), - SR_FGT(OP_TLBI_RVAE1NXS, HFGITR, TLBIRVAE1, 1), - SR_FGT(OP_TLBI_RVAALE1ISNXS, HFGITR, TLBIRVAALE1IS, 1), - SR_FGT(OP_TLBI_RVALE1ISNXS, HFGITR, TLBIRVALE1IS, 1), - SR_FGT(OP_TLBI_RVAAE1ISNXS, HFGITR, TLBIRVAAE1IS, 1), - SR_FGT(OP_TLBI_RVAE1ISNXS, HFGITR, TLBIRVAE1IS, 1), - SR_FGT(OP_TLBI_VAALE1ISNXS, HFGITR, TLBIVAALE1IS, 1), - SR_FGT(OP_TLBI_VALE1ISNXS, HFGITR, TLBIVALE1IS, 1), - SR_FGT(OP_TLBI_VAAE1ISNXS, HFGITR, TLBIVAAE1IS, 1), - SR_FGT(OP_TLBI_ASIDE1ISNXS, HFGITR, TLBIASIDE1IS, 1), - SR_FGT(OP_TLBI_VAE1ISNXS, HFGITR, TLBIVAE1IS, 1), - SR_FGT(OP_TLBI_VMALLE1ISNXS, HFGITR, TLBIVMALLE1IS, 1), - SR_FGT(OP_TLBI_RVAALE1OSNXS, HFGITR, TLBIRVAALE1OS, 1), - SR_FGT(OP_TLBI_RVALE1OSNXS, HFGITR, TLBIRVALE1OS, 1), - SR_FGT(OP_TLBI_RVAAE1OSNXS, HFGITR, TLBIRVAAE1OS, 1), - SR_FGT(OP_TLBI_RVAE1OSNXS, HFGITR, TLBIRVAE1OS, 1), - SR_FGT(OP_TLBI_VAALE1OSNXS, HFGITR, TLBIVAALE1OS, 1), - SR_FGT(OP_TLBI_VALE1OSNXS, HFGITR, TLBIVALE1OS, 1), - SR_FGT(OP_TLBI_VAAE1OSNXS, HFGITR, TLBIVAAE1OS, 1), - SR_FGT(OP_TLBI_ASIDE1OSNXS, HFGITR, TLBIASIDE1OS, 1), - SR_FGT(OP_TLBI_VAE1OSNXS, HFGITR, TLBIVAE1OS, 1), - SR_FGT(OP_TLBI_VMALLE1OSNXS, HFGITR, TLBIVMALLE1OS, 1), + /* nXS variants must be checked against HCRX_EL2.FGTnXS */ + SR_FGF(OP_TLBI_VAALE1NXS, HFGITR, TLBIVAALE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VALE1NXS, HFGITR, TLBIVALE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAAE1NXS, HFGITR, TLBIVAAE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_ASIDE1NXS, HFGITR, TLBIASIDE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAE1NXS, HFGITR, TLBIVAE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VMALLE1NXS, HFGITR, TLBIVMALLE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAALE1NXS, HFGITR, TLBIRVAALE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVALE1NXS, HFGITR, TLBIRVALE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAAE1NXS, HFGITR, TLBIRVAAE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAE1NXS, HFGITR, TLBIRVAE1, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAALE1ISNXS, HFGITR, TLBIRVAALE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVALE1ISNXS, HFGITR, TLBIRVALE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAAE1ISNXS, HFGITR, TLBIRVAAE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAE1ISNXS, HFGITR, TLBIRVAE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAALE1ISNXS, HFGITR, TLBIVAALE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VALE1ISNXS, HFGITR, TLBIVALE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAAE1ISNXS, HFGITR, TLBIVAAE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_ASIDE1ISNXS, HFGITR, TLBIASIDE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAE1ISNXS, HFGITR, TLBIVAE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VMALLE1ISNXS, HFGITR, TLBIVMALLE1IS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAALE1OSNXS, HFGITR, TLBIRVAALE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVALE1OSNXS, HFGITR, TLBIRVALE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAAE1OSNXS, HFGITR, TLBIRVAAE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_RVAE1OSNXS, HFGITR, TLBIRVAE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAALE1OSNXS, HFGITR, TLBIVAALE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VALE1OSNXS, HFGITR, TLBIVALE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAAE1OSNXS, HFGITR, TLBIVAAE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_ASIDE1OSNXS, HFGITR, TLBIASIDE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VAE1OSNXS, HFGITR, TLBIVAE1OS, 1, HCRX_FGTnXS), + SR_FGF(OP_TLBI_VMALLE1OSNXS, HFGITR, TLBIVMALLE1OS, 1, HCRX_FGTnXS), SR_FGT(OP_AT_S1E1WP, HFGITR, ATS1E1WP, 1), SR_FGT(OP_AT_S1E1RP, HFGITR, ATS1E1RP, 1), SR_FGT(OP_AT_S1E0W, HFGITR, ATS1E0W, 1), @@ -1622,6 +1636,7 @@ int __init populate_nv_trap_config(void) BUILD_BUG_ON(sizeof(union trap_config) != sizeof(void *)); BUILD_BUG_ON(__NR_CGT_GROUP_IDS__ > BIT(TC_CGT_BITS)); BUILD_BUG_ON(__NR_FGT_GROUP_IDS__ > BIT(TC_FGT_BITS)); + BUILD_BUG_ON(__NR_FG_FILTER_IDS__ > BIT(TC_FGF_BITS)); for (int i = 0; i < ARRAY_SIZE(encoding_to_cgt); i++) { const struct encoding_to_trap_config *cgt = &encoding_to_cgt[i]; @@ -1812,6 +1827,17 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) case HFGITR_GROUP: val = sanitised_sys_reg(vcpu, HFGITR_EL2); + switch (tc.fgf) { + u64 tmp; + + case __NO_FGF__: + break; + + case HCRX_FGTnXS: + tmp = sanitised_sys_reg(vcpu, HCRX_EL2); + if (tmp & HCRX_EL2_FGTnXS) + tc.fgt = __NO_FGT_GROUP__; + } break; case __NR_FGT_GROUP_IDS__: diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 060c5a0409e5..3acf6d77e324 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -197,8 +197,19 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); - if (cpus_have_final_cap(ARM64_HAS_HCX)) - write_sysreg_s(HCRX_GUEST_FLAGS, SYS_HCRX_EL2); + if (cpus_have_final_cap(ARM64_HAS_HCX)) { + u64 hcrx = HCRX_GUEST_FLAGS; + if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { + u64 clr = 0, set = 0; + + compute_clr_set(vcpu, HCRX_EL2, clr, set); + + hcrx |= set; + hcrx &= ~clr; + } + + write_sysreg_s(hcrx, SYS_HCRX_EL2); + } __activate_traps_hfgxtr(vcpu); } diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 3facd8918ae3..042695a210ce 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -117,7 +117,8 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p, break; case SYS_ID_AA64MMFR1_EL1: - val &= (NV_FTR(MMFR1, PAN) | + val &= (NV_FTR(MMFR1, HCX) | + NV_FTR(MMFR1, PAN) | NV_FTR(MMFR1, LO) | NV_FTR(MMFR1, HPDS) | NV_FTR(MMFR1, VH) | diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 9556896311db..e92ec810d449 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2372,6 +2372,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG(HFGITR_EL2, access_rw, reset_val, 0), EL2_REG(HACR_EL2, access_rw, reset_val, 0), + EL2_REG(HCRX_EL2, access_rw, reset_val, 0), + EL2_REG(TTBR0_EL2, access_rw, reset_val, 0), EL2_REG(TTBR1_EL2, access_rw, reset_val, 0), EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1), -- cgit From 60046980bf60ed8846113b484d666a5fdf413be4 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 16 Aug 2023 14:09:49 -0700 Subject: KVM: arm64: nv: Select XARRAY_MULTI to fix build error populate_nv_trap_config() uses xa_store_range(), which is only built when XARRAY_MULTI is set, so select that symbol to prevent the build error. aarch64-linux-ld: arch/arm64/kvm/emulate-nested.o: in function `populate_nv_trap_config': emulate-nested.c:(.init.text+0x17c): undefined reference to `xa_store_range' Fixes: e58ec47bf68d ("KVM: arm64: nv: Add trap forwarding infrastructure") Signed-off-by: Randy Dunlap Cc: Marc Zyngier Cc: Oliver Upton Cc: kvmarm@lists.linux.dev Cc: Catalin Marinas Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230816210949.17117-1-rdunlap@infradead.org --- arch/arm64/kvm/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index f531da6b362e..f4f0b19d755b 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -43,6 +43,7 @@ menuconfig KVM select SCHED_INFO select GUEST_PERF_EVENTS if PERF_EVENTS select INTERVAL_TREE + select XARRAY_MULTI help Support hosting virtualized guest machines. -- cgit From 3e1efe2b67d3d38116ec010968dbcd89d29e4561 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:41:44 -0700 Subject: KVM: Wrap kvm_{gfn,hva}_range.pte in a per-action union Wrap kvm_{gfn,hva}_range.pte in a union so that future notifier events can pass event specific information up and down the stack without needing to constantly expand and churn the APIs. Lockless aging of SPTEs will pass around a bitmap, and support for memory attributes will pass around the new attributes for the range. Add a "KVM_NO_ARG" placeholder to simplify handling events without an argument (creating a dummy union variable is midly annoying). Opportunstically drop explicit zero-initialization of the "pte" field, as omitting the field (now a union) has the same effect. Cc: Yu Zhao Link: https://lore.kernel.org/all/CAOUHufagkd2Jk3_HrVoFFptRXM=hX2CV8f+M-dka-hJU4bP8kw@mail.gmail.com Reviewed-by: Oliver Upton Acked-by: Yu Zhao Link: https://lore.kernel.org/r/20230729004144.1054885-1-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/arm64/kvm/mmu.c | 2 +- arch/mips/kvm/mmu.c | 2 +- arch/riscv/kvm/mmu.c | 2 +- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/mmu/tdp_mmu.c | 6 +++--- include/linux/kvm_host.h | 6 +++++- virt/kvm/kvm_main.c | 19 ++++++++++--------- 7 files changed, 22 insertions(+), 17 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 6db9ef288ec3..55f03a68f1cd 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1721,7 +1721,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - kvm_pfn_t pfn = pte_pfn(range->pte); + kvm_pfn_t pfn = pte_pfn(range->arg.pte); if (!kvm->arch.mmu.pgt) return false; diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index e8c08988ed37..7b2ac1319d70 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -447,7 +447,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { gpa_t gpa = range->start << PAGE_SHIFT; - pte_t hva_pte = range->pte; + pte_t hva_pte = range->arg.pte; pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); pte_t old_pte; diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index f2eb47925806..857f4312b0f8 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -559,7 +559,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { int ret; - kvm_pfn_t pfn = pte_pfn(range->pte); + kvm_pfn_t pfn = pte_pfn(range->arg.pte); if (!kvm->arch.pgd) return false; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ec169f5c7dce..d72f2b20f430 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1588,7 +1588,7 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm, for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, range->start, range->end - 1, &iterator) ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn, - iterator.level, range->pte); + iterator.level, range->arg.pte); return ret; } diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 512163d52194..6250bd3d20c1 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1241,7 +1241,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, u64 new_spte; /* Huge pages aren't expected to be modified without first being zapped. */ - WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end); + WARN_ON(pte_huge(range->arg.pte) || range->start + 1 != range->end); if (iter->level != PG_LEVEL_4K || !is_shadow_present_pte(iter->old_spte)) @@ -1255,9 +1255,9 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, */ tdp_mmu_iter_set_spte(kvm, iter, 0); - if (!pte_write(range->pte)) { + if (!pte_write(range->arg.pte)) { new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, - pte_pfn(range->pte)); + pte_pfn(range->arg.pte)); tdp_mmu_iter_set_spte(kvm, iter, new_spte); } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9d3ac7720da9..9125d0ab642d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -256,11 +256,15 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif #ifdef KVM_ARCH_WANT_MMU_NOTIFIER +union kvm_mmu_notifier_arg { + pte_t pte; +}; + struct kvm_gfn_range { struct kvm_memory_slot *slot; gfn_t start; gfn_t end; - pte_t pte; + union kvm_mmu_notifier_arg arg; bool may_block; }; bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index dfbaafbe3a00..92c50dc159e8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -526,7 +526,7 @@ typedef void (*on_unlock_fn_t)(struct kvm *kvm); struct kvm_hva_range { unsigned long start; unsigned long end; - pte_t pte; + union kvm_mmu_notifier_arg arg; hva_handler_t handler; on_lock_fn_t on_lock; on_unlock_fn_t on_unlock; @@ -547,6 +547,8 @@ static void kvm_null_fn(void) } #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) +static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG; + /* Iterate over each memslot intersecting [start, last] (inclusive) range */ #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \ for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \ @@ -591,7 +593,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, * bother making these conditional (to avoid writes on * the second or later invocation of the handler). */ - gfn_range.pte = range->pte; + gfn_range.arg = range->arg; gfn_range.may_block = range->may_block; /* @@ -632,14 +634,14 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, unsigned long start, unsigned long end, - pte_t pte, + union kvm_mmu_notifier_arg arg, hva_handler_t handler) { struct kvm *kvm = mmu_notifier_to_kvm(mn); const struct kvm_hva_range range = { .start = start, .end = end, - .pte = pte, + .arg = arg, .handler = handler, .on_lock = (void *)kvm_null_fn, .on_unlock = (void *)kvm_null_fn, @@ -659,7 +661,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn const struct kvm_hva_range range = { .start = start, .end = end, - .pte = __pte(0), .handler = handler, .on_lock = (void *)kvm_null_fn, .on_unlock = (void *)kvm_null_fn, @@ -693,6 +694,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, pte_t pte) { struct kvm *kvm = mmu_notifier_to_kvm(mn); + const union kvm_mmu_notifier_arg arg = { .pte = pte }; trace_kvm_set_spte_hva(address); @@ -708,7 +710,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, if (!READ_ONCE(kvm->mmu_invalidate_in_progress)) return; - kvm_handle_hva_range(mn, address, address + 1, pte, kvm_change_spte_gfn); + kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn); } void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, @@ -747,7 +749,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, const struct kvm_hva_range hva_range = { .start = range->start, .end = range->end, - .pte = __pte(0), .handler = kvm_unmap_gfn_range, .on_lock = kvm_mmu_invalidate_begin, .on_unlock = kvm_arch_guest_memory_reclaimed, @@ -812,7 +813,6 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, const struct kvm_hva_range hva_range = { .start = range->start, .end = range->end, - .pte = __pte(0), .handler = (void *)kvm_null_fn, .on_lock = kvm_mmu_invalidate_end, .on_unlock = (void *)kvm_null_fn, @@ -845,7 +845,8 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, { trace_kvm_age_hva(start, end); - return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); + return kvm_handle_hva_range(mn, start, end, KVM_MMU_NOTIFIER_NO_ARG, + kvm_age_gfn); } static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, -- cgit From 765da7fe0e76ed41eea9514433f4ca8cdb5312b1 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Mon, 7 Aug 2023 17:42:43 +0800 Subject: KVM: x86: Remove break statements that will never be executed Fix compiler warnings when compiling KVM with [-Wunreachable-code-break]. No functional change intended. Cc: Vitaly Kuznetsov Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20230807094243.32516-1-likexu@tencent.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/emulate.c | 2 -- arch/x86/kvm/hyperv.c | 1 - arch/x86/kvm/x86.c | 1 - 3 files changed, 4 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 936a397a08cd..2673cd5c46cb 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1799,13 +1799,11 @@ static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) op->addr.mem, &op->val, op->bytes); - break; case OP_MEM_STR: return segmented_write(ctxt, op->addr.mem, op->data, op->bytes * op->count); - break; case OP_XMM: kvm_write_sse_reg(op->addr.xmm, &op->vec_val); break; diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index b28fd020066f..7c2dac6824e2 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1293,7 +1293,6 @@ static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr) case HV_X64_MSR_VP_ASSIST_PAGE: return hv_vcpu->cpuid_cache.features_eax & HV_MSR_APIC_ACCESS_AVAILABLE; - break; case HV_X64_MSR_TSC_FREQUENCY: case HV_X64_MSR_APIC_FREQUENCY: return hv_vcpu->cpuid_cache.features_eax & diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 80ec33fc823b..5ff77a70b382 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4646,7 +4646,6 @@ static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr) return 0; default: return -ENXIO; - break; } } -- cgit From 7b0151caf73a656b75b550e361648430233455a0 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 8 Aug 2023 16:20:57 -0700 Subject: KVM: x86: Remove WARN sanity check on hypervisor timer vs. UNINITIALIZED vCPU Drop the WARN in KVM_RUN that asserts that KVM isn't using the hypervisor timer, a.k.a. the VMX preemption timer, for a vCPU that is in the UNINITIALIZIED activity state. The intent of the WARN is to sanity check that KVM won't drop a timer interrupt due to an unexpected transition to UNINITIALIZED, but unfortunately userspace can use various ioctl()s to force the unexpected state. Drop the sanity check instead of switching from the hypervisor timer to a software based timer, as the only reason to switch to a software timer when a vCPU is blocking is to ensure the timer interrupt wakes the vCPU, but said interrupt isn't a valid wake event for vCPUs in UNINITIALIZED state *and* the interrupt will be dropped in the end. Reported-by: Yikebaer Aizezi Closes: https://lore.kernel.org/all/CALcu4rbFrU4go8sBHk3FreP+qjgtZCGcYNpSiEXOLm==qFv7iQ@mail.gmail.com Reviewed-by: Paolo Bonzini Link: https://lore.kernel.org/r/20230808232057.2498287-1-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5ff77a70b382..d293adc05145 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11084,12 +11084,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) r = -EINTR; goto out; } + /* - * It should be impossible for the hypervisor timer to be in - * use before KVM has ever run the vCPU. + * Don't bother switching APIC timer emulation from the + * hypervisor timer to the software timer, the only way for the + * APIC timer to be active is if userspace stuffed vCPU state, + * i.e. put the vCPU into a nonsensical state. Only an INIT + * will transition the vCPU out of UNINITIALIZED (without more + * state stuffing from userspace), which will reset the local + * APIC and thus cancel the timer or drop the IRQ (if the timer + * already expired). */ - WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu)); - kvm_vcpu_srcu_read_unlock(vcpu); kvm_vcpu_block(vcpu); kvm_vcpu_srcu_read_lock(vcpu); -- cgit From 4c08e737f056fec930b416a2bd37ed266d724f95 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 8 Aug 2023 16:31:31 -0700 Subject: KVM: SVM: Take and hold ir_list_lock when updating vCPU's Physical ID entry Hoist the acquisition of ir_list_lock from avic_update_iommu_vcpu_affinity() to its two callers, avic_vcpu_load() and avic_vcpu_put(), specifically to encapsulate the write to the vCPU's entry in the AVIC Physical ID table. This will allow a future fix to pull information from the Physical ID entry when updating the IRTE, without potentially consuming stale information, i.e. without racing with the vCPU being (un)loaded. Add a comment to call out that ir_list_lock does NOT protect against multiple writers, specifically that reading the Physical ID entry in avic_vcpu_put() outside of the lock is safe. To preserve some semblance of independence from ir_list_lock, keep the READ_ONCE() in avic_vcpu_load() even though acuiring the spinlock effectively ensures the load(s) will be generated after acquiring the lock. Cc: stable@vger.kernel.org Tested-by: Alejandro Jimenez Reviewed-by: Joao Martins Link: https://lore.kernel.org/r/20230808233132.2499764-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/avic.c | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index cfc8ab773025..8e041b215ddb 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -986,10 +986,11 @@ static inline int avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r) { int ret = 0; - unsigned long flags; struct amd_svm_iommu_ir *ir; struct vcpu_svm *svm = to_svm(vcpu); + lockdep_assert_held(&svm->ir_list_lock); + if (!kvm_arch_has_assigned_device(vcpu->kvm)) return 0; @@ -997,19 +998,15 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r) * Here, we go through the per-vcpu ir_list to update all existing * interrupt remapping table entry targeting this vcpu. */ - spin_lock_irqsave(&svm->ir_list_lock, flags); - if (list_empty(&svm->ir_list)) - goto out; + return 0; list_for_each_entry(ir, &svm->ir_list, node) { ret = amd_iommu_update_ga(cpu, r, ir->data); if (ret) - break; + return ret; } -out: - spin_unlock_irqrestore(&svm->ir_list_lock, flags); - return ret; + return 0; } void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) @@ -1017,6 +1014,7 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) u64 entry; int h_physical_id = kvm_cpu_get_apicid(cpu); struct vcpu_svm *svm = to_svm(vcpu); + unsigned long flags; lockdep_assert_preemption_disabled(); @@ -1033,6 +1031,8 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (kvm_vcpu_is_blocking(vcpu)) return; + spin_lock_irqsave(&svm->ir_list_lock, flags); + entry = READ_ONCE(*(svm->avic_physical_id_cache)); WARN_ON_ONCE(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK); @@ -1042,25 +1042,40 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) WRITE_ONCE(*(svm->avic_physical_id_cache), entry); avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true); + + spin_unlock_irqrestore(&svm->ir_list_lock, flags); } void avic_vcpu_put(struct kvm_vcpu *vcpu) { u64 entry; struct vcpu_svm *svm = to_svm(vcpu); + unsigned long flags; lockdep_assert_preemption_disabled(); + /* + * Note, reading the Physical ID entry outside of ir_list_lock is safe + * as only the pCPU that has loaded (or is loading) the vCPU is allowed + * to modify the entry, and preemption is disabled. I.e. the vCPU + * can't be scheduled out and thus avic_vcpu_{put,load}() can't run + * recursively. + */ entry = READ_ONCE(*(svm->avic_physical_id_cache)); /* Nothing to do if IsRunning == '0' due to vCPU blocking. */ if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)) return; + spin_lock_irqsave(&svm->ir_list_lock, flags); + avic_update_iommu_vcpu_affinity(vcpu, -1, 0); entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK; WRITE_ONCE(*(svm->avic_physical_id_cache), entry); + + spin_unlock_irqrestore(&svm->ir_list_lock, flags); + } void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu) -- cgit From f3cebc75e7425d6949d726bb8e937095b0aef025 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 8 Aug 2023 16:31:32 -0700 Subject: KVM: SVM: Set target pCPU during IRTE update if target vCPU is running Update the target pCPU for IOMMU doorbells when updating IRTE routing if KVM is actively running the associated vCPU. KVM currently only updates the pCPU when loading the vCPU (via avic_vcpu_load()), and so doorbell events will be delayed until the vCPU goes through a put+load cycle (which might very well "never" happen for the lifetime of the VM). To avoid inserting a stale pCPU, e.g. due to racing between updating IRTE routing and vCPU load/put, get the pCPU information from the vCPU's Physical APIC ID table entry (a.k.a. avic_physical_id_cache in KVM) and update the IRTE while holding ir_list_lock. Add comments with --verbose enabled to explain exactly what is and isn't protected by ir_list_lock. Fixes: 411b44ba80ab ("svm: Implements update_pi_irte hook to setup posted interrupt") Reported-by: dengqiao.joey Cc: stable@vger.kernel.org Cc: Alejandro Jimenez Cc: Joao Martins Cc: Maxim Levitsky Cc: Suravee Suthikulpanit Tested-by: Alejandro Jimenez Reviewed-by: Joao Martins Link: https://lore.kernel.org/r/20230808233132.2499764-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/avic.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 8e041b215ddb..2092db892d7d 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -791,6 +791,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) int ret = 0; unsigned long flags; struct amd_svm_iommu_ir *ir; + u64 entry; /** * In some cases, the existing irte is updated and re-set, @@ -824,6 +825,18 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) ir->data = pi->ir_data; spin_lock_irqsave(&svm->ir_list_lock, flags); + + /* + * Update the target pCPU for IOMMU doorbells if the vCPU is running. + * If the vCPU is NOT running, i.e. is blocking or scheduled out, KVM + * will update the pCPU info when the vCPU awkened and/or scheduled in. + * See also avic_vcpu_load(). + */ + entry = READ_ONCE(*(svm->avic_physical_id_cache)); + if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK) + amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK, + true, pi->ir_data); + list_add(&ir->node, &svm->ir_list); spin_unlock_irqrestore(&svm->ir_list_lock, flags); out: @@ -1031,6 +1044,13 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (kvm_vcpu_is_blocking(vcpu)) return; + /* + * Grab the per-vCPU interrupt remapping lock even if the VM doesn't + * _currently_ have assigned devices, as that can change. Holding + * ir_list_lock ensures that either svm_ir_list_add() will consume + * up-to-date entry information, or that this task will wait until + * svm_ir_list_add() completes to set the new target pCPU. + */ spin_lock_irqsave(&svm->ir_list_lock, flags); entry = READ_ONCE(*(svm->avic_physical_id_cache)); @@ -1067,6 +1087,14 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu) if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)) return; + /* + * Take and hold the per-vCPU interrupt remapping lock while updating + * the Physical ID entry even though the lock doesn't protect against + * multiple writers (see above). Holding ir_list_lock ensures that + * either svm_ir_list_add() will consume up-to-date entry information, + * or that this task will wait until svm_ir_list_add() completes to + * mark the vCPU as not running. + */ spin_lock_irqsave(&svm->ir_list_lock, flags); avic_update_iommu_vcpu_affinity(vcpu, -1, 0); -- cgit From 7d18eef13622d84af93086ae79f081692516ba85 Mon Sep 17 00:00:00 2001 From: Shiyuan Gao Date: Thu, 10 Aug 2023 19:38:53 +0800 Subject: KVM: VMX: Rename vmx_get_max_tdp_level() to vmx_get_max_ept_level() In VMX, ept_level looks better than tdp_level and is consistent with SVM's get_npt_level(). Signed-off-by: Shiyuan Gao Link: https://lore.kernel.org/r/20230810113853.98114-1-gaoshiyuan@baidu.com [sean: massage changelog] Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 3f868826db7d..f848f43fa3e2 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3315,7 +3315,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) vmx->emulation_required = vmx_emulation_required(vcpu); } -static int vmx_get_max_tdp_level(void) +static int vmx_get_max_ept_level(void) { if (cpu_has_vmx_ept_5levels()) return 5; @@ -8498,7 +8498,7 @@ static __init int hardware_setup(void) */ vmx_setup_me_spte_mask(); - kvm_configure_mmu(enable_ept, 0, vmx_get_max_tdp_level(), + kvm_configure_mmu(enable_ept, 0, vmx_get_max_ept_level(), ept_caps_to_lpage_level(vmx_capability.ept)); /* -- cgit From 392a53246257a9f89ef421b60af62056ce374285 Mon Sep 17 00:00:00 2001 From: Li zeming Date: Thu, 17 Aug 2023 08:26:31 +0800 Subject: x86: kvm: x86: Remove unnecessary initial values of variables bitmap and khz is assigned first, so it does not need to initialize the assignment. Signed-off-by: Li zeming Link: https://lore.kernel.org/r/20230817002631.2885-1-zeming@nfschina.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d293adc05145..c91de9897e32 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6512,7 +6512,7 @@ static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter) static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter, struct kvm_msr_filter_range *user_range) { - unsigned long *bitmap = NULL; + unsigned long *bitmap; size_t bitmap_size; if (!user_range->nmsrs) @@ -9146,7 +9146,7 @@ static int kvmclock_cpu_down_prep(unsigned int cpu) static void tsc_khz_changed(void *data) { struct cpufreq_freqs *freq = data; - unsigned long khz = 0; + unsigned long khz; WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_CONSTANT_TSC)); -- cgit From f67063414c0e83bb4a9e12358cc179af53c2a8bb Mon Sep 17 00:00:00 2001 From: Manali Shukla Date: Mon, 17 Jul 2023 04:19:03 +0000 Subject: KVM: SVM: correct the size of spec_ctrl field in VMCB save area Correct the spec_ctrl field in the VMCB save area based on the AMD Programmer's manual. Originally, the spec_ctrl was listed as u32 with 4 bytes of reserved area. The AMD Programmer's Manual now lists the spec_ctrl as 8 bytes in VMCB save area. The Public Processor Programming reference for Genoa, shows SPEC_CTRL as 64b register, but the AMD Programmer's Manual lists SPEC_CTRL as 32b register. This discrepancy will be cleaned up in next revision of the AMD Programmer's Manual. Since remaining bits above bit 7 are reserved bits in SPEC_CTRL MSR and thus, not being used, the spec_ctrl added as u32 in the VMCB save area is currently not an issue. Fixes: 3dd2775b74c9 ("KVM: SVM: Create a separate mapping for the SEV-ES save area") Suggested-by: Tom Lendacky Signed-off-by: Manali Shukla Link: https://lore.kernel.org/r/20230717041903.85480-1-manali.shukla@amd.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/svm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 72ebd5e4e975..19bf955b67e0 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -346,7 +346,7 @@ struct vmcb_save_area { u64 last_excp_from; u64 last_excp_to; u8 reserved_0x298[72]; - u32 spec_ctrl; /* Guest version of SPEC_CTRL at 0x2E0 */ + u64 spec_ctrl; /* Guest version of SPEC_CTRL at 0x2E0 */ } __packed; /* Save area definition for SEV-ES and SEV-SNP guests */ @@ -513,7 +513,7 @@ struct ghcb { } __packed; -#define EXPECTED_VMCB_SAVE_AREA_SIZE 740 +#define EXPECTED_VMCB_SAVE_AREA_SIZE 744 #define EXPECTED_GHCB_SAVE_AREA_SIZE 1032 #define EXPECTED_SEV_ES_SAVE_AREA_SIZE 1648 #define EXPECTED_VMCB_CONTROL_AREA_SIZE 1024 -- cgit From 42764413d19585caff1321823b376c0bd0a597af Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 13:36:39 -0700 Subject: KVM: x86: Add a framework for enabling KVM-governed x86 features Introduce yet another X86_FEATURE flag framework to manage and cache KVM governed features (for lack of a better name). "Governed" in this case means that KVM has some level of involvement and/or vested interest in whether or not an X86_FEATURE can be used by the guest. The intent of the framework is twofold: to simplify caching of guest CPUID flags that KVM needs to frequently query, and to add clarity to such caching, e.g. it isn't immediately obvious that SVM's bundle of flags for "optional nested SVM features" track whether or not a flag is exposed to L1. Begrudgingly define KVM_MAX_NR_GOVERNED_FEATURES for the size of the bitmap to avoid exposing governed_features.h in arch/x86/include/asm/, but add a FIXME to call out that it can and should be cleaned up once "struct kvm_vcpu_arch" is no longer expose to the kernel at large. Cc: Zeng Guang Reviewed-by: Binbin Wu Reviewed-by: Kai Huang Reviewed-by: Yuan Yao Link: https://lore.kernel.org/r/20230815203653.519297-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 19 +++++++++++++++++ arch/x86/kvm/cpuid.c | 4 ++++ arch/x86/kvm/cpuid.h | 46 ++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/governed_features.h | 9 ++++++++ 4 files changed, 78 insertions(+) create mode 100644 arch/x86/kvm/governed_features.h diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index dad9331c5270..007fa8bfd634 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -831,6 +831,25 @@ struct kvm_vcpu_arch { struct kvm_cpuid_entry2 *cpuid_entries; struct kvm_hypervisor_cpuid kvm_cpuid; + /* + * FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly + * when "struct kvm_vcpu_arch" is no longer defined in an + * arch/x86/include/asm header. The max is mostly arbitrary, i.e. + * can be increased as necessary. + */ +#define KVM_MAX_NR_GOVERNED_FEATURES BITS_PER_LONG + + /* + * Track whether or not the guest is allowed to use features that are + * governed by KVM, where "governed" means KVM needs to manage state + * and/or explicitly enable the feature in hardware. Typically, but + * not always, governed features can be used by the guest if and only + * if both KVM and userspace want to expose the feature to the guest. + */ + struct { + DECLARE_BITMAP(enabled, KVM_MAX_NR_GOVERNED_FEATURES); + } governed_features; + u64 reserved_gpa_bits; int maxphyaddr; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 5a88affb2e1a..4ba43ae008cb 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -313,6 +313,10 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_cpuid_entry2 *best; + BUILD_BUG_ON(KVM_NR_GOVERNED_FEATURES > KVM_MAX_NR_GOVERNED_FEATURES); + bitmap_zero(vcpu->arch.governed_features.enabled, + KVM_MAX_NR_GOVERNED_FEATURES); + best = kvm_find_cpuid_entry(vcpu, 1); if (best && apic) { if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER)) diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index b1658c0de847..284fa4704553 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -232,4 +232,50 @@ static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu, return vcpu->arch.pv_cpuid.features & (1u << kvm_feature); } +enum kvm_governed_features { +#define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x, +#include "governed_features.h" + KVM_NR_GOVERNED_FEATURES +}; + +static __always_inline int kvm_governed_feature_index(unsigned int x86_feature) +{ + switch (x86_feature) { +#define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x; +#include "governed_features.h" + default: + return -1; + } +} + +static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature) +{ + return kvm_governed_feature_index(x86_feature) >= 0; +} + +static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu, + unsigned int x86_feature) +{ + BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature)); + + __set_bit(kvm_governed_feature_index(x86_feature), + vcpu->arch.governed_features.enabled); +} + +static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu, + unsigned int x86_feature) +{ + if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature)) + kvm_governed_feature_set(vcpu, x86_feature); +} + +static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, + unsigned int x86_feature) +{ + BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature)); + + return test_bit(kvm_governed_feature_index(x86_feature), + vcpu->arch.governed_features.enabled); +} + #endif diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h new file mode 100644 index 000000000000..40ce8e6608cd --- /dev/null +++ b/arch/x86/kvm/governed_features.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#if !defined(KVM_GOVERNED_FEATURE) || defined(KVM_GOVERNED_X86_FEATURE) +BUILD_BUG() +#endif + +#define KVM_GOVERNED_X86_FEATURE(x) KVM_GOVERNED_FEATURE(X86_FEATURE_##x) + +#undef KVM_GOVERNED_X86_FEATURE +#undef KVM_GOVERNED_FEATURE -- cgit From ccf31d6e6cc53e50cc42845061174082fd229c79 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 13:36:40 -0700 Subject: KVM: x86/mmu: Use KVM-governed feature framework to track "GBPAGES enabled" Use the governed feature framework to track whether or not the guest can use 1GiB pages, and drop the one-off helper that wraps the surprisingly non-trivial logic surrounding 1GiB page usage in the guest. No functional change intended. Reviewed-by: Yuan Yao Link: https://lore.kernel.org/r/20230815203653.519297-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/cpuid.c | 17 +++++++++++++++++ arch/x86/kvm/governed_features.h | 2 ++ arch/x86/kvm/mmu/mmu.c | 20 +++----------------- 3 files changed, 22 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 4ba43ae008cb..67e9f79fe059 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -312,11 +312,28 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_cpuid_entry2 *best; + bool allow_gbpages; BUILD_BUG_ON(KVM_NR_GOVERNED_FEATURES > KVM_MAX_NR_GOVERNED_FEATURES); bitmap_zero(vcpu->arch.governed_features.enabled, KVM_MAX_NR_GOVERNED_FEATURES); + /* + * If TDP is enabled, let the guest use GBPAGES if they're supported in + * hardware. The hardware page walker doesn't let KVM disable GBPAGES, + * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA + * walk for performance and complexity reasons. Not to mention KVM + * _can't_ solve the problem because GVA->GPA walks aren't visible to + * KVM once a TDP translation is installed. Mimic hardware behavior so + * that KVM's is at least consistent, i.e. doesn't randomly inject #PF. + * If TDP is disabled, honor *only* guest CPUID as KVM has full control + * and can install smaller shadow pages if the host lacks 1GiB support. + */ + allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) : + guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES); + if (allow_gbpages) + kvm_governed_feature_set(vcpu, X86_FEATURE_GBPAGES); + best = kvm_find_cpuid_entry(vcpu, 1); if (best && apic) { if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER)) diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h index 40ce8e6608cd..b29c15d5e038 100644 --- a/arch/x86/kvm/governed_features.h +++ b/arch/x86/kvm/governed_features.h @@ -5,5 +5,7 @@ BUILD_BUG() #define KVM_GOVERNED_X86_FEATURE(x) KVM_GOVERNED_FEATURE(X86_FEATURE_##x) +KVM_GOVERNED_X86_FEATURE(GBPAGES) + #undef KVM_GOVERNED_X86_FEATURE #undef KVM_GOVERNED_FEATURE diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 31d9cbe817a2..a0c2acb323eb 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4808,28 +4808,13 @@ static void __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check, } } -static bool guest_can_use_gbpages(struct kvm_vcpu *vcpu) -{ - /* - * If TDP is enabled, let the guest use GBPAGES if they're supported in - * hardware. The hardware page walker doesn't let KVM disable GBPAGES, - * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA - * walk for performance and complexity reasons. Not to mention KVM - * _can't_ solve the problem because GVA->GPA walks aren't visible to - * KVM once a TDP translation is installed. Mimic hardware behavior so - * that KVM's is at least consistent, i.e. doesn't randomly inject #PF. - */ - return tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) : - guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES); -} - static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { __reset_rsvds_bits_mask(&context->guest_rsvd_check, vcpu->arch.reserved_gpa_bits, context->cpu_role.base.level, is_efer_nx(context), - guest_can_use_gbpages(vcpu), + guest_can_use(vcpu, X86_FEATURE_GBPAGES), is_cr4_pse(context), guest_cpuid_is_amd_or_hygon(vcpu)); } @@ -4906,7 +4891,8 @@ static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(), context->root_role.level, context->root_role.efer_nx, - guest_can_use_gbpages(vcpu), is_pse, is_amd); + guest_can_use(vcpu, X86_FEATURE_GBPAGES), + is_pse, is_amd); if (!shadow_me_mask) return; -- cgit From 1143c0b85c07fc45daf1c33b26ba9320becd6748 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 13:36:41 -0700 Subject: KVM: VMX: Recompute "XSAVES enabled" only after CPUID update Recompute whether or not XSAVES is enabled for the guest only if the guest's CPUID model changes instead of redoing the computation every time KVM generates vmcs01's secondary execution controls. The boot_cpu_has() and cpu_has_vmx_xsaves() checks should never change after KVM is loaded, and if they do the kernel/KVM is hosed. Opportunistically add a comment explaining _why_ XSAVES is effectively exposed to the guest if and only if XSAVE is also exposed to the guest. Practically speaking, no functional change intended (KVM will do fewer computations, but should still see the same xsaves_enabled value whenever KVM looks at it). Reviewed-by: Yuan Yao Link: https://lore.kernel.org/r/20230815203653.519297-4-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 3235998ca261..159a58c12115 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4612,19 +4612,10 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) if (!enable_pml || !atomic_read(&vcpu->kvm->nr_memslots_dirty_logging)) exec_control &= ~SECONDARY_EXEC_ENABLE_PML; - if (cpu_has_vmx_xsaves()) { - /* Exposing XSAVES only when XSAVE is exposed */ - bool xsaves_enabled = - boot_cpu_has(X86_FEATURE_XSAVE) && - guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && - guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); - - vcpu->arch.xsaves_enabled = xsaves_enabled; - + if (cpu_has_vmx_xsaves()) vmx_adjust_secondary_exec_control(vmx, &exec_control, SECONDARY_EXEC_XSAVES, - xsaves_enabled, false); - } + vcpu->arch.xsaves_enabled, false); /* * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either @@ -7747,8 +7738,15 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - /* xsaves_enabled is recomputed in vmx_compute_secondary_exec_control(). */ - vcpu->arch.xsaves_enabled = false; + /* + * XSAVES is effectively enabled if and only if XSAVE is also exposed + * to the guest. XSAVES depends on CR4.OSXSAVE, and CR4.OSXSAVE can be + * set if and only if XSAVE is supported. + */ + vcpu->arch.xsaves_enabled = cpu_has_vmx_xsaves() && + boot_cpu_has(X86_FEATURE_XSAVE) && + guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && + guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); vmx_setup_uret_msrs(vmx); -- cgit From 0497d2ac9b26d015cfafc4655ad9308c30a61810 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 13:36:42 -0700 Subject: KVM: VMX: Check KVM CPU caps, not just VMX MSR support, for XSAVE enabling Check KVM CPU capabilities instead of raw VMX support for XSAVES when determining whether or not XSAVER can/should be exposed to the guest. Practically speaking, it's nonsensical/impossible for a CPU to support "enable XSAVES" without XSAVES being supported natively. The real motivation for checking kvm_cpu_cap_has() is to allow using the governed feature's standard check-and-set logic. Reviewed-by: Yuan Yao Link: https://lore.kernel.org/r/20230815203653.519297-5-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 159a58c12115..968401b2efcd 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7743,7 +7743,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) * to the guest. XSAVES depends on CR4.OSXSAVE, and CR4.OSXSAVE can be * set if and only if XSAVE is supported. */ - vcpu->arch.xsaves_enabled = cpu_has_vmx_xsaves() && + vcpu->arch.xsaves_enabled = kvm_cpu_cap_has(X86_FEATURE_XSAVES) && boot_cpu_has(X86_FEATURE_XSAVE) && guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); -- cgit From 662f6815786efc9a60ee374142b3fd70db4637be Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 13:36:43 -0700 Subject: KVM: VMX: Rename XSAVES control to follow KVM's preferred "ENABLE_XYZ" Rename the XSAVES secondary execution control to follow KVM's preferred style so that XSAVES related logic can use common macros that depend on KVM's preferred style. No functional change intended. Reviewed-by: Vitaly Kuznetsov Link: https://lore.kernel.org/r/20230815203653.519297-6-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/vmx.h | 2 +- arch/x86/kvm/vmx/capabilities.h | 2 +- arch/x86/kvm/vmx/hyperv.c | 2 +- arch/x86/kvm/vmx/nested.c | 6 +++--- arch/x86/kvm/vmx/nested.h | 2 +- arch/x86/kvm/vmx/vmx.c | 2 +- arch/x86/kvm/vmx/vmx.h | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 0d02c4aafa6f..0e73616b82f3 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -71,7 +71,7 @@ #define SECONDARY_EXEC_RDSEED_EXITING VMCS_CONTROL_BIT(RDSEED_EXITING) #define SECONDARY_EXEC_ENABLE_PML VMCS_CONTROL_BIT(PAGE_MOD_LOGGING) #define SECONDARY_EXEC_PT_CONCEAL_VMX VMCS_CONTROL_BIT(PT_CONCEAL_VMX) -#define SECONDARY_EXEC_XSAVES VMCS_CONTROL_BIT(XSAVES) +#define SECONDARY_EXEC_ENABLE_XSAVES VMCS_CONTROL_BIT(XSAVES) #define SECONDARY_EXEC_MODE_BASED_EPT_EXEC VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC) #define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA) #define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING) diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index d0abee35d7ba..41a4533f9989 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -252,7 +252,7 @@ static inline bool cpu_has_vmx_pml(void) static inline bool cpu_has_vmx_xsaves(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_XSAVES; + SECONDARY_EXEC_ENABLE_XSAVES; } static inline bool cpu_has_vmx_waitpkg(void) diff --git a/arch/x86/kvm/vmx/hyperv.c b/arch/x86/kvm/vmx/hyperv.c index 79450e1ed7cf..313b8bb5b8a7 100644 --- a/arch/x86/kvm/vmx/hyperv.c +++ b/arch/x86/kvm/vmx/hyperv.c @@ -78,7 +78,7 @@ SECONDARY_EXEC_DESC | \ SECONDARY_EXEC_ENABLE_RDTSCP | \ SECONDARY_EXEC_ENABLE_INVPCID | \ - SECONDARY_EXEC_XSAVES | \ + SECONDARY_EXEC_ENABLE_XSAVES | \ SECONDARY_EXEC_RDSEED_EXITING | \ SECONDARY_EXEC_RDRAND_EXITING | \ SECONDARY_EXEC_TSC_SCALING | \ diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 516391cc0d64..22e08d30baef 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2307,7 +2307,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_ENABLE_RDTSCP | - SECONDARY_EXEC_XSAVES | + SECONDARY_EXEC_ENABLE_XSAVES | SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_APIC_REGISTER_VIRT | @@ -6331,7 +6331,7 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, * If if it were, XSS would have to be checked against * the XSS exit bitmap in vmcs12. */ - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_XSAVES); case EXIT_REASON_UMWAIT: case EXIT_REASON_TPAUSE: return nested_cpu_has2(vmcs12, @@ -6874,7 +6874,7 @@ static void nested_vmx_setup_secondary_ctls(u32 ept_caps, SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_ENABLE_VMFUNC | SECONDARY_EXEC_RDSEED_EXITING | - SECONDARY_EXEC_XSAVES | + SECONDARY_EXEC_ENABLE_XSAVES | SECONDARY_EXEC_TSC_SCALING | SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h index 96952263b029..b4b9d51438c6 100644 --- a/arch/x86/kvm/vmx/nested.h +++ b/arch/x86/kvm/vmx/nested.h @@ -168,7 +168,7 @@ static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12) static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12) { - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_XSAVES); } static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 968401b2efcd..d0bba4a34824 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4614,7 +4614,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) if (cpu_has_vmx_xsaves()) vmx_adjust_secondary_exec_control(vmx, &exec_control, - SECONDARY_EXEC_XSAVES, + SECONDARY_EXEC_ENABLE_XSAVES, vcpu->arch.xsaves_enabled, false); /* diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 32384ba38499..cde902b44d97 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -562,7 +562,7 @@ static inline u8 vmx_get_rvi(void) SECONDARY_EXEC_APIC_REGISTER_VIRT | \ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \ SECONDARY_EXEC_SHADOW_VMCS | \ - SECONDARY_EXEC_XSAVES | \ + SECONDARY_EXEC_ENABLE_XSAVES | \ SECONDARY_EXEC_RDSEED_EXITING | \ SECONDARY_EXEC_RDRAND_EXITING | \ SECONDARY_EXEC_ENABLE_PML | \ -- cgit From fe60e8f65f79138ffdcc8a9155036cc35f6079ac Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 13:36:44 -0700 Subject: KVM: x86: Use KVM-governed feature framework to track "XSAVES enabled" Use the governed feature framework to track if XSAVES is "enabled", i.e. if XSAVES can be used by the guest. Add a comment in the SVM code to explain the very unintuitive logic of deliberately NOT checking if XSAVES is enumerated in the guest CPUID model. No functional change intended. Reviewed-by: Yuan Yao Link: https://lore.kernel.org/r/20230815203653.519297-7-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/governed_features.h | 1 + arch/x86/kvm/svm/svm.c | 17 ++++++++++++++--- arch/x86/kvm/vmx/vmx.c | 36 ++++++++++++++++++------------------ arch/x86/kvm/x86.c | 4 ++-- 5 files changed, 35 insertions(+), 24 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 007fa8bfd634..771adf2438bc 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -746,7 +746,6 @@ struct kvm_vcpu_arch { u64 smi_count; bool at_instruction_boundary; bool tpr_access_reporting; - bool xsaves_enabled; bool xfd_no_write_intercept; u64 ia32_xss; u64 microcode_version; diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h index b29c15d5e038..b896a64e4ac3 100644 --- a/arch/x86/kvm/governed_features.h +++ b/arch/x86/kvm/governed_features.h @@ -6,6 +6,7 @@ BUILD_BUG() #define KVM_GOVERNED_X86_FEATURE(x) KVM_GOVERNED_FEATURE(X86_FEATURE_##x) KVM_GOVERNED_X86_FEATURE(GBPAGES) +KVM_GOVERNED_X86_FEATURE(XSAVES) #undef KVM_GOVERNED_X86_FEATURE #undef KVM_GOVERNED_FEATURE diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 6d8932e05753..1c6b1175016d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4244,9 +4244,20 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); struct kvm_cpuid_entry2 *best; - vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && - boot_cpu_has(X86_FEATURE_XSAVE) && - boot_cpu_has(X86_FEATURE_XSAVES); + /* + * SVM doesn't provide a way to disable just XSAVES in the guest, KVM + * can only disable all variants of by disallowing CR4.OSXSAVE from + * being set. As a result, if the host has XSAVE and XSAVES, and the + * guest has XSAVE enabled, the guest can execute XSAVES without + * faulting. Treat XSAVES as enabled in this case regardless of + * whether it's advertised to the guest so that KVM context switches + * XSS on VM-Enter/VM-Exit. Failure to do so would effectively give + * the guest read/write access to the host's XSS. + */ + if (boot_cpu_has(X86_FEATURE_XSAVE) && + boot_cpu_has(X86_FEATURE_XSAVES) && + guest_cpuid_has(vcpu, X86_FEATURE_XSAVE)) + kvm_governed_feature_set(vcpu, X86_FEATURE_XSAVES); /* Update nrips enabled cache */ svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) && diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d0bba4a34824..55cd17231867 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4543,16 +4543,19 @@ vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control, * based on a single guest CPUID bit, with a dedicated feature bit. This also * verifies that the control is actually supported by KVM and hardware. */ -#define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \ -({ \ - bool __enabled; \ - \ - if (cpu_has_vmx_##name()) { \ - __enabled = guest_cpuid_has(&(vmx)->vcpu, \ - X86_FEATURE_##feat_name); \ - vmx_adjust_secondary_exec_control(vmx, exec_control, \ - SECONDARY_EXEC_##ctrl_name, __enabled, exiting); \ - } \ +#define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \ +({ \ + struct kvm_vcpu *__vcpu = &(vmx)->vcpu; \ + bool __enabled; \ + \ + if (cpu_has_vmx_##name()) { \ + if (kvm_is_governed_feature(X86_FEATURE_##feat_name)) \ + __enabled = guest_can_use(__vcpu, X86_FEATURE_##feat_name); \ + else \ + __enabled = guest_cpuid_has(__vcpu, X86_FEATURE_##feat_name); \ + vmx_adjust_secondary_exec_control(vmx, exec_control, SECONDARY_EXEC_##ctrl_name,\ + __enabled, exiting); \ + } \ }) /* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */ @@ -4612,10 +4615,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) if (!enable_pml || !atomic_read(&vcpu->kvm->nr_memslots_dirty_logging)) exec_control &= ~SECONDARY_EXEC_ENABLE_PML; - if (cpu_has_vmx_xsaves()) - vmx_adjust_secondary_exec_control(vmx, &exec_control, - SECONDARY_EXEC_ENABLE_XSAVES, - vcpu->arch.xsaves_enabled, false); + vmx_adjust_sec_exec_feature(vmx, &exec_control, xsaves, XSAVES); /* * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either @@ -4634,6 +4634,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) SECONDARY_EXEC_ENABLE_RDTSCP, rdpid_or_rdtscp_enabled, false); } + vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID); vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND); @@ -7743,10 +7744,9 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) * to the guest. XSAVES depends on CR4.OSXSAVE, and CR4.OSXSAVE can be * set if and only if XSAVE is supported. */ - vcpu->arch.xsaves_enabled = kvm_cpu_cap_has(X86_FEATURE_XSAVES) && - boot_cpu_has(X86_FEATURE_XSAVE) && - guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && - guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); + if (boot_cpu_has(X86_FEATURE_XSAVE) && + guest_cpuid_has(vcpu, X86_FEATURE_XSAVE)) + kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES); vmx_setup_uret_msrs(vmx); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c91de9897e32..7849ea0b0bf7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1015,7 +1015,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) if (vcpu->arch.xcr0 != host_xcr0) xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); - if (vcpu->arch.xsaves_enabled && + if (guest_can_use(vcpu, X86_FEATURE_XSAVES) && vcpu->arch.ia32_xss != host_xss) wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); } @@ -1046,7 +1046,7 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) if (vcpu->arch.xcr0 != host_xcr0) xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); - if (vcpu->arch.xsaves_enabled && + if (guest_can_use(vcpu, X86_FEATURE_XSAVES) && vcpu->arch.ia32_xss != host_xss) wrmsrl(MSR_IA32_XSS, host_xss); } -- cgit From 1c18efdaa3148605071ae8b3551ee86c95c8807b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 13:36:45 -0700 Subject: KVM: nVMX: Use KVM-governed feature framework to track "nested VMX enabled" Track "VMX exposed to L1" via a governed feature flag instead of using a dedicated helper to provide the same functionality. The main goal is to drive convergence between VMX and SVM with respect to querying features that are controllable via module param (SVM likes to cache nested features), avoiding the guest CPUID lookups at runtime is just a bonus and unlikely to provide any meaningful performance benefits. Note, X86_FEATURE_VMX is set in kvm_cpu_caps if and only if "nested" is true, and the CPU obviously supports VMX if KVM+VMX is running. I.e. the check on "nested" is now implicitly down by the kvm_cpu_cap_has() check in kvm_governed_feature_check_and_set(). No functional change intended. Reviewed-by: Yuan Yao Reviwed-by: Kai Huang Link: https://lore.kernel.org/r/20230815203653.519297-8-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/governed_features.h | 1 + arch/x86/kvm/vmx/nested.c | 7 ++++--- arch/x86/kvm/vmx/vmx.c | 21 ++++++--------------- arch/x86/kvm/vmx/vmx.h | 1 - 4 files changed, 11 insertions(+), 19 deletions(-) diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h index b896a64e4ac3..22446614bf49 100644 --- a/arch/x86/kvm/governed_features.h +++ b/arch/x86/kvm/governed_features.h @@ -7,6 +7,7 @@ BUILD_BUG() KVM_GOVERNED_X86_FEATURE(GBPAGES) KVM_GOVERNED_X86_FEATURE(XSAVES) +KVM_GOVERNED_X86_FEATURE(VMX) #undef KVM_GOVERNED_X86_FEATURE #undef KVM_GOVERNED_FEATURE diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 22e08d30baef..c5ec0ef51ff7 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -6426,7 +6426,7 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, vmx = to_vmx(vcpu); vmcs12 = get_vmcs12(vcpu); - if (nested_vmx_allowed(vcpu) && + if (guest_can_use(vcpu, X86_FEATURE_VMX) && (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; @@ -6567,7 +6567,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) return -EINVAL; } else { - if (!nested_vmx_allowed(vcpu)) + if (!guest_can_use(vcpu, X86_FEATURE_VMX)) return -EINVAL; if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) @@ -6601,7 +6601,8 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, return -EINVAL; if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) && - (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled)) + (!guest_can_use(vcpu, X86_FEATURE_VMX) || + !vmx->nested.enlightened_vmcs_enabled)) return -EINVAL; vmx_leave_nested(vcpu); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 55cd17231867..e9386afd1521 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1908,17 +1908,6 @@ static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu) vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); } -/* - * nested_vmx_allowed() checks whether a guest should be allowed to use VMX - * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for - * all guests if the "nested" module option is off, and can also be disabled - * for a single guest by disabling its VMX cpuid bit. - */ -bool nested_vmx_allowed(struct kvm_vcpu *vcpu) -{ - return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX); -} - /* * Userspace is allowed to set any supported IA32_FEATURE_CONTROL regardless of * guest CPUID. Note, KVM allows userspace to set "VMX in SMX" to maintain @@ -2046,7 +2035,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) [msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0]; break; case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: - if (!nested_vmx_allowed(vcpu)) + if (!guest_can_use(vcpu, X86_FEATURE_VMX)) return 1; if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, &msr_info->data)) @@ -2354,7 +2343,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: if (!msr_info->host_initiated) return 1; /* they are read-only */ - if (!nested_vmx_allowed(vcpu)) + if (!guest_can_use(vcpu, X86_FEATURE_VMX)) return 1; return vmx_set_vmx_msr(vcpu, msr_index, data); case MSR_IA32_RTIT_CTL: @@ -7748,13 +7737,15 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) guest_cpuid_has(vcpu, X86_FEATURE_XSAVE)) kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES); + kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VMX); + vmx_setup_uret_msrs(vmx); if (cpu_has_secondary_exec_ctrls()) vmcs_set_secondary_exec_control(vmx, vmx_secondary_exec_control(vmx)); - if (nested_vmx_allowed(vcpu)) + if (guest_can_use(vcpu, X86_FEATURE_VMX)) vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; @@ -7763,7 +7754,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) ~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX); - if (nested_vmx_allowed(vcpu)) + if (guest_can_use(vcpu, X86_FEATURE_VMX)) nested_vmx_cr_fixed1_bits_update(vcpu); if (boot_cpu_has(X86_FEATURE_INTEL_PT) && diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index cde902b44d97..c2130d2c8e24 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -374,7 +374,6 @@ struct kvm_vmx { u64 *pid_table; }; -bool nested_vmx_allowed(struct kvm_vcpu *vcpu); void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, struct loaded_vmcs *buddy); int allocate_vpid(void); -- cgit From 7a6a6a3bf5d8c32a91d8fabb0e2d31f3bd23a412 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 13:36:46 -0700 Subject: KVM: nSVM: Use KVM-governed feature framework to track "NRIPS enabled" Track "NRIPS exposed to L1" via a governed feature flag instead of using a dedicated bit/flag in vcpu_svm. No functional change intended. Reviewed-by: Yuan Yao Link: https://lore.kernel.org/r/20230815203653.519297-9-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/governed_features.h | 1 + arch/x86/kvm/svm/nested.c | 6 +++--- arch/x86/kvm/svm/svm.c | 4 +--- arch/x86/kvm/svm/svm.h | 1 - 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h index 22446614bf49..722b66af412c 100644 --- a/arch/x86/kvm/governed_features.h +++ b/arch/x86/kvm/governed_features.h @@ -8,6 +8,7 @@ BUILD_BUG() KVM_GOVERNED_X86_FEATURE(GBPAGES) KVM_GOVERNED_X86_FEATURE(XSAVES) KVM_GOVERNED_X86_FEATURE(VMX) +KVM_GOVERNED_X86_FEATURE(NRIPS) #undef KVM_GOVERNED_X86_FEATURE #undef KVM_GOVERNED_FEATURE diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 3342cc4a5189..9092f3f8dccf 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -716,7 +716,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, * what a nrips=0 CPU would do (L1 is responsible for advancing RIP * prior to injecting the event). */ - if (svm->nrips_enabled) + if (guest_can_use(vcpu, X86_FEATURE_NRIPS)) vmcb02->control.next_rip = svm->nested.ctl.next_rip; else if (boot_cpu_has(X86_FEATURE_NRIPS)) vmcb02->control.next_rip = vmcb12_rip; @@ -726,7 +726,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, svm->soft_int_injected = true; svm->soft_int_csbase = vmcb12_csbase; svm->soft_int_old_rip = vmcb12_rip; - if (svm->nrips_enabled) + if (guest_can_use(vcpu, X86_FEATURE_NRIPS)) svm->soft_int_next_rip = svm->nested.ctl.next_rip; else svm->soft_int_next_rip = vmcb12_rip; @@ -1026,7 +1026,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm) if (vmcb12->control.exit_code != SVM_EXIT_ERR) nested_save_pending_event_to_vmcb12(svm, vmcb12); - if (svm->nrips_enabled) + if (guest_can_use(vcpu, X86_FEATURE_NRIPS)) vmcb12->control.next_rip = vmcb02->control.next_rip; vmcb12->control.int_ctl = svm->nested.ctl.int_ctl; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 1c6b1175016d..de69e98a7c6a 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4259,9 +4259,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) guest_cpuid_has(vcpu, X86_FEATURE_XSAVE)) kvm_governed_feature_set(vcpu, X86_FEATURE_XSAVES); - /* Update nrips enabled cache */ - svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) && - guest_cpuid_has(vcpu, X86_FEATURE_NRIPS); + kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_NRIPS); svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR); svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 5829a1801862..c06de55425d4 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -259,7 +259,6 @@ struct vcpu_svm { bool soft_int_injected; /* optional nested SVM features that are enabled for this guest */ - bool nrips_enabled : 1; bool tsc_scaling_enabled : 1; bool v_vmload_vmsave_enabled : 1; bool lbrv_enabled : 1; -- cgit From 4365a45571c791a2fbeb81cf27738960c5456f57 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 13:36:47 -0700 Subject: KVM: nSVM: Use KVM-governed feature framework to track "TSC scaling enabled" Track "TSC scaling exposed to L1" via a governed feature flag instead of using a dedicated bit/flag in vcpu_svm. Note, this fixes a benign bug where KVM would mark TSC scaling as exposed to L1 even if overall nested SVM supported is disabled, i.e. KVM would let L1 write MSR_AMD64_TSC_RATIO even when KVM didn't advertise TSCRATEMSR support to userspace. Reviewed-by: Yuan Yao Link: https://lore.kernel.org/r/20230815203653.519297-10-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/governed_features.h | 1 + arch/x86/kvm/svm/nested.c | 2 +- arch/x86/kvm/svm/svm.c | 10 ++++++---- arch/x86/kvm/svm/svm.h | 1 - 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h index 722b66af412c..32c0469cf952 100644 --- a/arch/x86/kvm/governed_features.h +++ b/arch/x86/kvm/governed_features.h @@ -9,6 +9,7 @@ KVM_GOVERNED_X86_FEATURE(GBPAGES) KVM_GOVERNED_X86_FEATURE(XSAVES) KVM_GOVERNED_X86_FEATURE(VMX) KVM_GOVERNED_X86_FEATURE(NRIPS) +KVM_GOVERNED_X86_FEATURE(TSCRATEMSR) #undef KVM_GOVERNED_X86_FEATURE #undef KVM_GOVERNED_FEATURE diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 9092f3f8dccf..da65948064dc 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -695,7 +695,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, vmcb02->control.tsc_offset = vcpu->arch.tsc_offset; - if (svm->tsc_scaling_enabled && + if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) && svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) nested_svm_update_tsc_ratio_msr(vcpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index de69e98a7c6a..0d5cca0bb5fb 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2795,7 +2795,8 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr_info->index) { case MSR_AMD64_TSC_RATIO: - if (!msr_info->host_initiated && !svm->tsc_scaling_enabled) + if (!msr_info->host_initiated && + !guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR)) return 1; msr_info->data = svm->tsc_ratio_msr; break; @@ -2937,7 +2938,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) switch (ecx) { case MSR_AMD64_TSC_RATIO: - if (!svm->tsc_scaling_enabled) { + if (!guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR)) { if (!msr->host_initiated) return 1; @@ -2959,7 +2960,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->tsc_ratio_msr = data; - if (svm->tsc_scaling_enabled && is_guest_mode(vcpu)) + if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) && + is_guest_mode(vcpu)) nested_svm_update_tsc_ratio_msr(vcpu); break; @@ -4260,8 +4262,8 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_governed_feature_set(vcpu, X86_FEATURE_XSAVES); kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_NRIPS); + kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_TSCRATEMSR); - svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR); svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV); svm->v_vmload_vmsave_enabled = vls && guest_cpuid_has(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index c06de55425d4..4e7332f77702 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -259,7 +259,6 @@ struct vcpu_svm { bool soft_int_injected; /* optional nested SVM features that are enabled for this guest */ - bool tsc_scaling_enabled : 1; bool v_vmload_vmsave_enabled : 1; bool lbrv_enabled : 1; bool pause_filter_enabled : 1; -- cgit From 4d2a1560ffc29525493829ee31dc069c00c52c69 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 13:36:48 -0700 Subject: KVM: nSVM: Use KVM-governed feature framework to track "vVM{SAVE,LOAD} enabled" Track "virtual VMSAVE/VMLOAD exposed to L1" via a governed feature flag instead of using a dedicated bit/flag in vcpu_svm. Opportunistically add a comment explaining why KVM disallows virtual VMLOAD/VMSAVE when the vCPU model is Intel. No functional change intended. Link: https://lore.kernel.org/r/20230815203653.519297-11-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/governed_features.h | 1 + arch/x86/kvm/svm/nested.c | 2 +- arch/x86/kvm/svm/svm.c | 10 +++++++--- arch/x86/kvm/svm/svm.h | 1 - 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h index 32c0469cf952..f01a95fd0071 100644 --- a/arch/x86/kvm/governed_features.h +++ b/arch/x86/kvm/governed_features.h @@ -10,6 +10,7 @@ KVM_GOVERNED_X86_FEATURE(XSAVES) KVM_GOVERNED_X86_FEATURE(VMX) KVM_GOVERNED_X86_FEATURE(NRIPS) KVM_GOVERNED_X86_FEATURE(TSCRATEMSR) +KVM_GOVERNED_X86_FEATURE(V_VMSAVE_VMLOAD) #undef KVM_GOVERNED_X86_FEATURE #undef KVM_GOVERNED_FEATURE diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index da65948064dc..24d47ebeb0e0 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -107,7 +107,7 @@ static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm) { - if (!svm->v_vmload_vmsave_enabled) + if (!guest_can_use(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD)) return true; if (!nested_npt_enabled(svm)) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 0d5cca0bb5fb..78d53ea513f4 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1194,8 +1194,6 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0); set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0); - - svm->v_vmload_vmsave_enabled = false; } else { /* * If hardware supports Virtual VMLOAD VMSAVE then enable it @@ -4266,7 +4264,13 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV); - svm->v_vmload_vmsave_enabled = vls && guest_cpuid_has(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD); + /* + * Intercept VMLOAD if the vCPU mode is Intel in order to emulate that + * VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing + * SVM on Intel is bonkers and extremely unlikely to work). + */ + if (!guest_cpuid_is_intel(vcpu)) + kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD); svm->pause_filter_enabled = kvm_cpu_cap_has(X86_FEATURE_PAUSEFILTER) && guest_cpuid_has(vcpu, X86_FEATURE_PAUSEFILTER); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 4e7332f77702..b475241df6dc 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -259,7 +259,6 @@ struct vcpu_svm { bool soft_int_injected; /* optional nested SVM features that are enabled for this guest */ - bool v_vmload_vmsave_enabled : 1; bool lbrv_enabled : 1; bool pause_filter_enabled : 1; bool pause_threshold_enabled : 1; -- cgit From e183d17ac362655e5ef061760245b4402b9c04f8 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 13:36:49 -0700 Subject: KVM: nSVM: Use KVM-governed feature framework to track "LBRv enabled" Track "LBR virtualization exposed to L1" via a governed feature flag instead of using a dedicated bit/flag in vcpu_svm. Note, checking KVM's capabilities instead of the "lbrv" param means that the code isn't strictly equivalent, as lbrv_enabled could have been set if nested=false where as that the governed feature cannot. But that's a glorified nop as the feature/flag is consumed only by paths that are gated by nSVM being enabled. Link: https://lore.kernel.org/r/20230815203653.519297-12-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/governed_features.h | 1 + arch/x86/kvm/svm/nested.c | 23 +++++++++++++---------- arch/x86/kvm/svm/svm.c | 5 ++--- arch/x86/kvm/svm/svm.h | 1 - 4 files changed, 16 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h index f01a95fd0071..3a4c0e40e1e0 100644 --- a/arch/x86/kvm/governed_features.h +++ b/arch/x86/kvm/governed_features.h @@ -11,6 +11,7 @@ KVM_GOVERNED_X86_FEATURE(VMX) KVM_GOVERNED_X86_FEATURE(NRIPS) KVM_GOVERNED_X86_FEATURE(TSCRATEMSR) KVM_GOVERNED_X86_FEATURE(V_VMSAVE_VMLOAD) +KVM_GOVERNED_X86_FEATURE(LBRV) #undef KVM_GOVERNED_X86_FEATURE #undef KVM_GOVERNED_FEATURE diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 24d47ebeb0e0..f50f74b1a04e 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -552,6 +552,7 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12 bool new_vmcb12 = false; struct vmcb *vmcb01 = svm->vmcb01.ptr; struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; + struct kvm_vcpu *vcpu = &svm->vcpu; nested_vmcb02_compute_g_pat(svm); @@ -577,18 +578,18 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12 vmcb_mark_dirty(vmcb02, VMCB_DT); } - kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED); + kvm_set_rflags(vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED); - svm_set_efer(&svm->vcpu, svm->nested.save.efer); + svm_set_efer(vcpu, svm->nested.save.efer); - svm_set_cr0(&svm->vcpu, svm->nested.save.cr0); - svm_set_cr4(&svm->vcpu, svm->nested.save.cr4); + svm_set_cr0(vcpu, svm->nested.save.cr0); + svm_set_cr4(vcpu, svm->nested.save.cr4); svm->vcpu.arch.cr2 = vmcb12->save.cr2; - kvm_rax_write(&svm->vcpu, vmcb12->save.rax); - kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp); - kvm_rip_write(&svm->vcpu, vmcb12->save.rip); + kvm_rax_write(vcpu, vmcb12->save.rax); + kvm_rsp_write(vcpu, vmcb12->save.rsp); + kvm_rip_write(vcpu, vmcb12->save.rip); /* In case we don't even reach vcpu_run, the fields are not updated */ vmcb02->save.rax = vmcb12->save.rax; @@ -602,7 +603,8 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12 vmcb_mark_dirty(vmcb02, VMCB_DR); } - if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { + if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) && + (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { /* * Reserved bits of DEBUGCTL are ignored. Be consistent with * svm_set_msr's definition of reserved bits. @@ -734,7 +736,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, vmcb02->control.virt_ext = vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK; - if (svm->lbrv_enabled) + if (guest_can_use(vcpu, X86_FEATURE_LBRV)) vmcb02->control.virt_ext |= (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK); @@ -1065,7 +1067,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm) if (!nested_exit_on_intr(svm)) kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); - if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { + if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) && + (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { svm_copy_lbrs(vmcb12, vmcb02); svm_update_lbrv(vcpu); } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) { diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 78d53ea513f4..3b8e412238d3 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1024,7 +1024,7 @@ void svm_update_lbrv(struct kvm_vcpu *vcpu) bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK); - if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled)) + if (unlikely(is_guest_mode(vcpu) && guest_can_use(vcpu, X86_FEATURE_LBRV))) if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)) enable_lbrv = true; @@ -4261,8 +4261,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_NRIPS); kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_TSCRATEMSR); - - svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV); + kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LBRV); /* * Intercept VMLOAD if the vCPU mode is Intel in order to emulate that diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index b475241df6dc..0e21823e8a19 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -259,7 +259,6 @@ struct vcpu_svm { bool soft_int_injected; /* optional nested SVM features that are enabled for this guest */ - bool lbrv_enabled : 1; bool pause_filter_enabled : 1; bool pause_threshold_enabled : 1; bool vgif_enabled : 1; -- cgit From 59d67fc1f0dbb5ee8841d62140efe0fdc4fbabf5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 13:36:50 -0700 Subject: KVM: nSVM: Use KVM-governed feature framework to track "Pause Filter enabled" Track "Pause Filtering is exposed to L1" via governed feature flags instead of using dedicated bits/flags in vcpu_svm. No functional change intended. Reviewed-by: Yuan Yao Link: https://lore.kernel.org/r/20230815203653.519297-13-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/governed_features.h | 2 ++ arch/x86/kvm/svm/nested.c | 10 ++++++++-- arch/x86/kvm/svm/svm.c | 7 ++----- arch/x86/kvm/svm/svm.h | 2 -- 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h index 3a4c0e40e1e0..9afd34f30599 100644 --- a/arch/x86/kvm/governed_features.h +++ b/arch/x86/kvm/governed_features.h @@ -12,6 +12,8 @@ KVM_GOVERNED_X86_FEATURE(NRIPS) KVM_GOVERNED_X86_FEATURE(TSCRATEMSR) KVM_GOVERNED_X86_FEATURE(V_VMSAVE_VMLOAD) KVM_GOVERNED_X86_FEATURE(LBRV) +KVM_GOVERNED_X86_FEATURE(PAUSEFILTER) +KVM_GOVERNED_X86_FEATURE(PFTHRESHOLD) #undef KVM_GOVERNED_X86_FEATURE #undef KVM_GOVERNED_FEATURE diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index f50f74b1a04e..ac03b2bc5b2c 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -743,8 +743,14 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, if (!nested_vmcb_needs_vls_intercept(svm)) vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; - pause_count12 = svm->pause_filter_enabled ? svm->nested.ctl.pause_filter_count : 0; - pause_thresh12 = svm->pause_threshold_enabled ? svm->nested.ctl.pause_filter_thresh : 0; + if (guest_can_use(vcpu, X86_FEATURE_PAUSEFILTER)) + pause_count12 = svm->nested.ctl.pause_filter_count; + else + pause_count12 = 0; + if (guest_can_use(vcpu, X86_FEATURE_PFTHRESHOLD)) + pause_thresh12 = svm->nested.ctl.pause_filter_thresh; + else + pause_thresh12 = 0; if (kvm_pause_in_guest(svm->vcpu.kvm)) { /* use guest values since host doesn't intercept PAUSE */ vmcb02->control.pause_filter_count = pause_count12; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 3b8e412238d3..0dc70ad88d29 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4271,11 +4271,8 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) if (!guest_cpuid_is_intel(vcpu)) kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD); - svm->pause_filter_enabled = kvm_cpu_cap_has(X86_FEATURE_PAUSEFILTER) && - guest_cpuid_has(vcpu, X86_FEATURE_PAUSEFILTER); - - svm->pause_threshold_enabled = kvm_cpu_cap_has(X86_FEATURE_PFTHRESHOLD) && - guest_cpuid_has(vcpu, X86_FEATURE_PFTHRESHOLD); + kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PAUSEFILTER); + kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PFTHRESHOLD); svm->vgif_enabled = vgif && guest_cpuid_has(vcpu, X86_FEATURE_VGIF); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 0e21823e8a19..fb438439b61e 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -259,8 +259,6 @@ struct vcpu_svm { bool soft_int_injected; /* optional nested SVM features that are enabled for this guest */ - bool pause_filter_enabled : 1; - bool pause_threshold_enabled : 1; bool vgif_enabled : 1; bool vnmi_enabled : 1; -- cgit From b89456aee78d22b20c6c83c4d75af7985ae5be8d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 13:36:51 -0700 Subject: KVM: nSVM: Use KVM-governed feature framework to track "vGIF enabled" Track "virtual GIF exposed to L1" via a governed feature flag instead of using a dedicated bit/flag in vcpu_svm. Note, checking KVM's capabilities instead of the "vgif" param means that the code isn't strictly equivalent, as vgif_enabled could have been set if nested=false where as that the governed feature cannot. But that's a glorified nop as the feature/flag is consumed only by paths that are Link: https://lore.kernel.org/r/20230815203653.519297-14-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/governed_features.h | 1 + arch/x86/kvm/svm/nested.c | 3 ++- arch/x86/kvm/svm/svm.c | 3 +-- arch/x86/kvm/svm/svm.h | 5 +++-- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h index 9afd34f30599..368696c2e96b 100644 --- a/arch/x86/kvm/governed_features.h +++ b/arch/x86/kvm/governed_features.h @@ -14,6 +14,7 @@ KVM_GOVERNED_X86_FEATURE(V_VMSAVE_VMLOAD) KVM_GOVERNED_X86_FEATURE(LBRV) KVM_GOVERNED_X86_FEATURE(PAUSEFILTER) KVM_GOVERNED_X86_FEATURE(PFTHRESHOLD) +KVM_GOVERNED_X86_FEATURE(VGIF) #undef KVM_GOVERNED_X86_FEATURE #undef KVM_GOVERNED_FEATURE diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index ac03b2bc5b2c..dd496c9e5f91 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -660,7 +660,8 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes. */ - if (svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK)) + if (guest_can_use(vcpu, X86_FEATURE_VGIF) && + (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK)) int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK); else int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 0dc70ad88d29..d8b7e0c71313 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4273,8 +4273,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PAUSEFILTER); kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PFTHRESHOLD); - - svm->vgif_enabled = vgif && guest_cpuid_has(vcpu, X86_FEATURE_VGIF); + kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VGIF); svm->vnmi_enabled = vnmi && guest_cpuid_has(vcpu, X86_FEATURE_VNMI); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index fb438439b61e..6eb5877cc6c3 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -22,6 +22,7 @@ #include #include +#include "cpuid.h" #include "kvm_cache_regs.h" #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) @@ -259,7 +260,6 @@ struct vcpu_svm { bool soft_int_injected; /* optional nested SVM features that are enabled for this guest */ - bool vgif_enabled : 1; bool vnmi_enabled : 1; u32 ldr_reg; @@ -485,7 +485,8 @@ static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit) static inline bool nested_vgif_enabled(struct vcpu_svm *svm) { - return svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK); + return guest_can_use(&svm->vcpu, X86_FEATURE_VGIF) && + (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK); } static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm) -- cgit From ee785c870d6f80380be6549772137a41c527d0ba Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 13:36:52 -0700 Subject: KVM: nSVM: Use KVM-governed feature framework to track "vNMI enabled" Track "virtual NMI exposed to L1" via a governed feature flag instead of using a dedicated bit/flag in vcpu_svm. Note, checking KVM's capabilities instead of the "vnmi" param means that the code isn't strictly equivalent, as vnmi_enabled could have been set if nested=false where as that the governed feature cannot. But that's a glorified nop as the feature/flag is consumed only by paths that are gated by nSVM being enabled. Link: https://lore.kernel.org/r/20230815203653.519297-15-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/governed_features.h | 1 + arch/x86/kvm/svm/svm.c | 3 +-- arch/x86/kvm/svm/svm.h | 5 +---- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h index 368696c2e96b..423a73395c10 100644 --- a/arch/x86/kvm/governed_features.h +++ b/arch/x86/kvm/governed_features.h @@ -15,6 +15,7 @@ KVM_GOVERNED_X86_FEATURE(LBRV) KVM_GOVERNED_X86_FEATURE(PAUSEFILTER) KVM_GOVERNED_X86_FEATURE(PFTHRESHOLD) KVM_GOVERNED_X86_FEATURE(VGIF) +KVM_GOVERNED_X86_FEATURE(VNMI) #undef KVM_GOVERNED_X86_FEATURE #undef KVM_GOVERNED_FEATURE diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index d8b7e0c71313..226b3a780d0f 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4274,8 +4274,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PAUSEFILTER); kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PFTHRESHOLD); kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VGIF); - - svm->vnmi_enabled = vnmi && guest_cpuid_has(vcpu, X86_FEATURE_VNMI); + kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VNMI); svm_recalc_instruction_intercepts(vcpu, svm); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 6eb5877cc6c3..06400cfe2244 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -259,9 +259,6 @@ struct vcpu_svm { unsigned long soft_int_next_rip; bool soft_int_injected; - /* optional nested SVM features that are enabled for this guest */ - bool vnmi_enabled : 1; - u32 ldr_reg; u32 dfr_reg; struct page *avic_backing_page; @@ -537,7 +534,7 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm) static inline bool nested_vnmi_enabled(struct vcpu_svm *svm) { - return svm->vnmi_enabled && + return guest_can_use(&svm->vcpu, X86_FEATURE_VNMI) && (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK); } -- cgit From 9717efbe5ba3f52d4b3cc637c7f7f6149ea264bb Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 13:36:53 -0700 Subject: KVM: x86: Disallow guest CPUID lookups when IRQs are disabled Now that KVM has a framework for caching guest CPUID feature flags, add a "rule" that IRQs must be enabled when doing guest CPUID lookups, and enforce the rule via a lockdep assertion. CPUID lookups are slow, and within KVM, IRQs are only ever disabled in hot paths, e.g. the core run loop, fast page fault handling, etc. I.e. querying guest CPUID with IRQs disabled, especially in the run loop, should be avoided. Link: https://lore.kernel.org/r/20230815203653.519297-16-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/cpuid.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 67e9f79fe059..e961e9a05847 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -11,6 +11,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include "linux/lockdep.h" #include #include #include @@ -84,6 +85,18 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( struct kvm_cpuid_entry2 *e; int i; + /* + * KVM has a semi-arbitrary rule that querying the guest's CPUID model + * with IRQs disabled is disallowed. The CPUID model can legitimately + * have over one hundred entries, i.e. the lookup is slow, and IRQs are + * typically disabled in KVM only when KVM is in a performance critical + * path, e.g. the core VM-Enter/VM-Exit run loop. Nothing will break + * if this rule is violated, this assertion is purely to flag potential + * performance issues. If this fires, consider moving the lookup out + * of the hotpath, e.g. by caching information during CPUID updates. + */ + lockdep_assert_irqs_enabled(); + for (i = 0; i < nent; i++) { e = &entries[i]; -- cgit From 1f8403953f05af591ab72cf749b9b9b837ea9595 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Mon, 14 Aug 2023 22:03:39 +0800 Subject: KVM: Remove unused kvm_device_{get,put}() declarations Commit 07f0a7bdec5c ("kvm: destroy emulated devices on VM exit") removed the functions but not these declarations. Signed-off-by: Yue Haibing Link: https://lore.kernel.org/r/20230814140339.47732-1-yuehaibing@huawei.com [sean: split to separate patch] Signed-off-by: Sean Christopherson --- include/linux/kvm_host.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9125d0ab642d..a973a7cb45e0 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2152,8 +2152,6 @@ struct kvm_device_ops { int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); }; -void kvm_device_get(struct kvm_device *dev); -void kvm_device_put(struct kvm_device *dev); struct kvm_device *kvm_device_from_filp(struct file *filp); int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type); void kvm_unregister_device_ops(u32 type); -- cgit From 458933d33af2cb3663bd8c0080c1efd1f9483db4 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Mon, 14 Aug 2023 22:03:39 +0800 Subject: KVM: Remove unused kvm_make_cpus_request_mask() declaration Commit 7ee30bc132c6 ("KVM: x86: deliver KVM IOAPIC scan request to target vCPUs") declared but never implemented kvm_make_cpus_request_mask() as kvm_make_vcpus_request_mask() already existed. Note, KVM's APIs are painfully inconsistent, as the inclusive variant uses "vcpus", whereas the exclusive/all variants use "cpus", which is likely what led to the spurious declaration. The "vcpus" terminology is more correct, especially since the helpers will kick _physical_ CPUs by calling kvm_kick_many_cpus(). But that's a cleanup for the future. Signed-off-by: Yue Haibing Link: https://lore.kernel.org/r/20230814140339.47732-1-yuehaibing@huawei.com [sean: split to separate patch, call out inconsistent naming] Signed-off-by: Sean Christopherson --- include/linux/kvm_host.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a973a7cb45e0..199698a5ffa6 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -190,8 +190,6 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except); -bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, - unsigned long *vcpu_bitmap); #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 -- cgit From cf6d80c0eb95900e19e1ad54c686a1877dda4276 Mon Sep 17 00:00:00 2001 From: Jinrong Liang Date: Thu, 10 Aug 2023 17:09:40 +0800 Subject: KVM: selftests: Add x86 properties for Intel PMU in processor.h Add x86 properties for Intel PMU so that tests don't have to manually retrieve the correct CPUID leaf+register, and so that the resulting code is self-documenting. Suggested-by: Sean Christopherson Signed-off-by: Jinrong Liang Link: https://lore.kernel.org/r/20230810090945.16053-2-cloudliang@tencent.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/x86_64/processor.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index aa434c8f19c5..4fd042112526 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -239,7 +239,12 @@ struct kvm_x86_cpu_property { #define X86_PROPERTY_MAX_BASIC_LEAF KVM_X86_CPU_PROPERTY(0, 0, EAX, 0, 31) #define X86_PROPERTY_PMU_VERSION KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 0, 7) #define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15) +#define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23) #define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31) +#define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7) +#define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31) +#define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4) +#define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12) #define X86_PROPERTY_SUPPORTED_XCR0_LO KVM_X86_CPU_PROPERTY(0xd, 0, EAX, 0, 31) #define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0 KVM_X86_CPU_PROPERTY(0xd, 0, EBX, 0, 31) -- cgit From c853be2265ccd497ff17b878962a2dbcb4873abf Mon Sep 17 00:00:00 2001 From: Jinrong Liang Date: Thu, 10 Aug 2023 17:09:41 +0800 Subject: KVM: selftests: Drop the return of remove_event() None of the callers consume remove_event(), and it incorrectly implies that the incoming filter isn't modified. Drop the return. Suggested-by: Sean Christopherson Signed-off-by: Jinrong Liang Link: https://lore.kernel.org/r/20230810090945.16053-3-cloudliang@tencent.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 40507ed9fe8a..5ac05e64bec9 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -265,8 +265,7 @@ static struct kvm_pmu_event_filter *event_filter(uint32_t action) * Remove the first occurrence of 'event' (if any) from the filter's * event list. */ -static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f, - uint64_t event) +static void remove_event(struct kvm_pmu_event_filter *f, uint64_t event) { bool found = false; int i; @@ -279,7 +278,6 @@ static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f, } if (found) f->nevents--; - return f; } #define ASSERT_PMC_COUNTING_INSTRUCTIONS() \ -- cgit From de527b1daf69a41a265f48ad7bb9207ca7b7d29b Mon Sep 17 00:00:00 2001 From: Jinrong Liang Date: Thu, 10 Aug 2023 17:09:42 +0800 Subject: KVM: selftests: Introduce "struct __kvm_pmu_event_filter" to manipulate filter Add custom "__kvm_pmu_event_filter" structure to improve pmu event filter settings. Simplifies event filter setup by organizing event filter parameters in a cleaner, more organized way. Alternatively, selftests could use a struct overlay ala vcpu_set_msr() to avoid dynamically allocating the array: struct { struct kvm_msrs header; struct kvm_msr_entry entry; } buffer = {}; memset(&buffer, 0, sizeof(buffer)); buffer.header.nmsrs = 1; buffer.entry.index = msr_index; buffer.entry.data = msr_value; but the extra layer added by the nested structs is counterproductive to writing efficient, clean code. Suggested-by: Sean Christopherson Signed-off-by: Jinrong Liang Link: https://lore.kernel.org/r/20230810090945.16053-4-cloudliang@tencent.com [sean: massage changelog to explain alternative] Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_event_filter_test.c | 182 ++++++++++----------- 1 file changed, 90 insertions(+), 92 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 5ac05e64bec9..fcb6cf9ae4a9 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -28,6 +28,10 @@ #define NUM_BRANCHES 42 +/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */ +#define MAX_FILTER_EVENTS 300 +#define MAX_TEST_EVENTS 10 + /* * This is how the event selector and unit mask are stored in an AMD * core performance event-select register. Intel's format is similar, @@ -69,21 +73,33 @@ #define INST_RETIRED EVENT(0xc0, 0) +struct __kvm_pmu_event_filter { + __u32 action; + __u32 nevents; + __u32 fixed_counter_bitmap; + __u32 flags; + __u32 pad[4]; + __u64 events[MAX_FILTER_EVENTS]; +}; + /* * This event list comprises Intel's eight architectural events plus * AMD's "retired branch instructions" for Zen[123] (and possibly * other AMD CPUs). */ -static const uint64_t event_list[] = { - EVENT(0x3c, 0), - INST_RETIRED, - EVENT(0x3c, 1), - EVENT(0x2e, 0x4f), - EVENT(0x2e, 0x41), - EVENT(0xc4, 0), - EVENT(0xc5, 0), - EVENT(0xa4, 1), - AMD_ZEN_BR_RETIRED, +static const struct __kvm_pmu_event_filter base_event_filter = { + .nevents = ARRAY_SIZE(base_event_filter.events), + .events = { + EVENT(0x3c, 0), + INST_RETIRED, + EVENT(0x3c, 1), + EVENT(0x2e, 0x4f), + EVENT(0x2e, 0x41), + EVENT(0xc4, 0), + EVENT(0xc5, 0), + EVENT(0xa4, 1), + AMD_ZEN_BR_RETIRED, + }, }; struct { @@ -225,47 +241,11 @@ static bool sanity_check_pmu(struct kvm_vcpu *vcpu) return !r; } -static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents) -{ - struct kvm_pmu_event_filter *f; - int size = sizeof(*f) + nevents * sizeof(f->events[0]); - - f = malloc(size); - TEST_ASSERT(f, "Out of memory"); - memset(f, 0, size); - f->nevents = nevents; - return f; -} - - -static struct kvm_pmu_event_filter * -create_pmu_event_filter(const uint64_t event_list[], int nevents, - uint32_t action, uint32_t flags) -{ - struct kvm_pmu_event_filter *f; - int i; - - f = alloc_pmu_event_filter(nevents); - f->action = action; - f->flags = flags; - for (i = 0; i < nevents; i++) - f->events[i] = event_list[i]; - - return f; -} - -static struct kvm_pmu_event_filter *event_filter(uint32_t action) -{ - return create_pmu_event_filter(event_list, - ARRAY_SIZE(event_list), - action, 0); -} - /* * Remove the first occurrence of 'event' (if any) from the filter's * event list. */ -static void remove_event(struct kvm_pmu_event_filter *f, uint64_t event) +static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event) { bool found = false; int i; @@ -313,66 +293,73 @@ static void test_without_filter(struct kvm_vcpu *vcpu) } static void test_with_filter(struct kvm_vcpu *vcpu, - struct kvm_pmu_event_filter *f) + struct __kvm_pmu_event_filter *__f) { + struct kvm_pmu_event_filter *f = (void *)__f; + vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f); run_vcpu_and_sync_pmc_results(vcpu); } static void test_amd_deny_list(struct kvm_vcpu *vcpu) { - uint64_t event = EVENT(0x1C2, 0); - struct kvm_pmu_event_filter *f; + struct __kvm_pmu_event_filter f = { + .action = KVM_PMU_EVENT_DENY, + .nevents = 1, + .events = { + EVENT(0x1C2, 0), + }, + }; - f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0); - test_with_filter(vcpu, f); - free(f); + test_with_filter(vcpu, &f); ASSERT_PMC_COUNTING_INSTRUCTIONS(); } static void test_member_deny_list(struct kvm_vcpu *vcpu) { - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY); + struct __kvm_pmu_event_filter f = base_event_filter; - test_with_filter(vcpu, f); - free(f); + f.action = KVM_PMU_EVENT_DENY; + test_with_filter(vcpu, &f); ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(); } static void test_member_allow_list(struct kvm_vcpu *vcpu) { - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW); + struct __kvm_pmu_event_filter f = base_event_filter; - test_with_filter(vcpu, f); - free(f); + f.action = KVM_PMU_EVENT_ALLOW; + test_with_filter(vcpu, &f); ASSERT_PMC_COUNTING_INSTRUCTIONS(); } static void test_not_member_deny_list(struct kvm_vcpu *vcpu) { - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY); + struct __kvm_pmu_event_filter f = base_event_filter; - remove_event(f, INST_RETIRED); - remove_event(f, INTEL_BR_RETIRED); - remove_event(f, AMD_ZEN_BR_RETIRED); - test_with_filter(vcpu, f); - free(f); + f.action = KVM_PMU_EVENT_DENY; + + remove_event(&f, INST_RETIRED); + remove_event(&f, INTEL_BR_RETIRED); + remove_event(&f, AMD_ZEN_BR_RETIRED); + test_with_filter(vcpu, &f); ASSERT_PMC_COUNTING_INSTRUCTIONS(); } static void test_not_member_allow_list(struct kvm_vcpu *vcpu) { - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW); + struct __kvm_pmu_event_filter f = base_event_filter; + + f.action = KVM_PMU_EVENT_ALLOW; - remove_event(f, INST_RETIRED); - remove_event(f, INTEL_BR_RETIRED); - remove_event(f, AMD_ZEN_BR_RETIRED); - test_with_filter(vcpu, f); - free(f); + remove_event(&f, INST_RETIRED); + remove_event(&f, INTEL_BR_RETIRED); + remove_event(&f, AMD_ZEN_BR_RETIRED); + test_with_filter(vcpu, &f); ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(); } @@ -567,19 +554,16 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu, const uint64_t masked_events[], const int nmasked_events) { - struct kvm_pmu_event_filter *f; + struct __kvm_pmu_event_filter f = { + .nevents = nmasked_events, + .action = KVM_PMU_EVENT_ALLOW, + .flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS, + }; - f = create_pmu_event_filter(masked_events, nmasked_events, - KVM_PMU_EVENT_ALLOW, - KVM_PMU_EVENT_FLAG_MASKED_EVENTS); - test_with_filter(vcpu, f); - free(f); + memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events); + test_with_filter(vcpu, &f); } -/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */ -#define MAX_FILTER_EVENTS 300 -#define MAX_TEST_EVENTS 10 - #define ALLOW_LOADS BIT(0) #define ALLOW_STORES BIT(1) #define ALLOW_LOADS_STORES BIT(2) @@ -751,17 +735,27 @@ static void test_masked_events(struct kvm_vcpu *vcpu) run_masked_events_tests(vcpu, events, nevents); } -static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events, - int nevents, uint32_t flags) +static int set_pmu_event_filter(struct kvm_vcpu *vcpu, + struct __kvm_pmu_event_filter *__f) { - struct kvm_pmu_event_filter *f; - int r; + struct kvm_pmu_event_filter *f = (void *)__f; - f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags); - r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f); - free(f); + return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f); +} + +static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event, + uint32_t flags, uint32_t action) +{ + struct __kvm_pmu_event_filter f = { + .nevents = 1, + .flags = flags, + .action = action, + .events = { + event, + }, + }; - return r; + return set_pmu_event_filter(vcpu, &f); } static void test_filter_ioctl(struct kvm_vcpu *vcpu) @@ -773,14 +767,18 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu) * Unfortunately having invalid bits set in event data is expected to * pass when flags == 0 (bits other than eventsel+umask). */ - r = run_filter_test(vcpu, &e, 1, 0); + r = set_pmu_single_event_filter(vcpu, e, 0, KVM_PMU_EVENT_ALLOW); TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing"); - r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS); + r = set_pmu_single_event_filter(vcpu, e, + KVM_PMU_EVENT_FLAG_MASKED_EVENTS, + KVM_PMU_EVENT_ALLOW); TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail"); e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf); - r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS); + r = set_pmu_single_event_filter(vcpu, e, + KVM_PMU_EVENT_FLAG_MASKED_EVENTS, + KVM_PMU_EVENT_ALLOW); TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing"); } -- cgit From 86ab6af8b96a6a6c8f86f1b0ea1dd386f43930db Mon Sep 17 00:00:00 2001 From: Jinrong Liang Date: Thu, 10 Aug 2023 17:09:43 +0800 Subject: KVM: selftests: Add test cases for unsupported PMU event filter input values Add test cases to verify the handling of unsupported input values for the PMU event filter. The tests cover unsupported "action" values, unsupported "flags" values, and unsupported "nevents" values. All these cases should return an error, as they are currently not supported by the filter. Furthermore, the tests also cover the case where setting non-existent fixed counters in the fixed bitmap does not fail. Signed-off-by: Jinrong Liang Reviewed-by: Isaku Yamahata Link: https://lore.kernel.org/r/20230810090945.16053-5-cloudliang@tencent.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_event_filter_test.c | 26 ++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index fcb6cf9ae4a9..ca4ff5ad613a 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -32,6 +32,10 @@ #define MAX_FILTER_EVENTS 300 #define MAX_TEST_EVENTS 10 +#define PMU_EVENT_FILTER_INVALID_ACTION (KVM_PMU_EVENT_DENY + 1) +#define PMU_EVENT_FILTER_INVALID_FLAGS (KVM_PMU_EVENT_FLAGS_VALID_MASK << 1) +#define PMU_EVENT_FILTER_INVALID_NEVENTS (MAX_FILTER_EVENTS + 1) + /* * This is how the event selector and unit mask are stored in an AMD * core performance event-select register. Intel's format is similar, @@ -760,6 +764,8 @@ static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event, static void test_filter_ioctl(struct kvm_vcpu *vcpu) { + uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); + struct __kvm_pmu_event_filter f; uint64_t e = ~0ul; int r; @@ -780,6 +786,26 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu) KVM_PMU_EVENT_FLAG_MASKED_EVENTS, KVM_PMU_EVENT_ALLOW); TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing"); + + f = base_event_filter; + f.action = PMU_EVENT_FILTER_INVALID_ACTION; + r = set_pmu_event_filter(vcpu, &f); + TEST_ASSERT(r, "Set invalid action is expected to fail"); + + f = base_event_filter; + f.flags = PMU_EVENT_FILTER_INVALID_FLAGS; + r = set_pmu_event_filter(vcpu, &f); + TEST_ASSERT(r, "Set invalid flags is expected to fail"); + + f = base_event_filter; + f.nevents = PMU_EVENT_FILTER_INVALID_NEVENTS; + r = set_pmu_event_filter(vcpu, &f); + TEST_ASSERT(r, "Exceeding the max number of filter events should fail"); + + f = base_event_filter; + f.fixed_counter_bitmap = ~GENMASK_ULL(nr_fixed_counters, 0); + r = set_pmu_event_filter(vcpu, &f); + TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed"); } int main(int argc, char *argv[]) -- cgit From d4e36166820a089f6300dc59dbb641e8c150bec1 Mon Sep 17 00:00:00 2001 From: Jinrong Liang Date: Thu, 10 Aug 2023 17:09:44 +0800 Subject: KVM: selftests: Test if event filter meets expectations on fixed counters Add tests to cover that pmu event_filter works as expected when it's applied to fixed performance counters, even if there is none fixed counter exists (e.g. Intel guest pmu version=1 or AMD guest). Signed-off-by: Jinrong Liang Reviewed-by: Isaku Yamahata Link: https://lore.kernel.org/r/20230810090945.16053-6-cloudliang@tencent.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_event_filter_test.c | 80 ++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index ca4ff5ad613a..c9355ca4cd50 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -27,6 +27,7 @@ #define ARCH_PERFMON_BRANCHES_RETIRED 5 #define NUM_BRANCHES 42 +#define INTEL_PMC_IDX_FIXED 32 /* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */ #define MAX_FILTER_EVENTS 300 @@ -808,6 +809,84 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu) TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed"); } +static void intel_run_fixed_counter_guest_code(uint8_t fixed_ctr_idx) +{ + for (;;) { + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); + wrmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx, 0); + + /* Only OS_EN bit is enabled for fixed counter[idx]. */ + wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx)); + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, + BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx)); + __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); + + GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx)); + } +} + +static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu, + uint32_t action, uint32_t bitmap) +{ + struct __kvm_pmu_event_filter f = { + .action = action, + .fixed_counter_bitmap = bitmap, + }; + set_pmu_event_filter(vcpu, &f); + + return run_vcpu_to_sync(vcpu); +} + +static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx, + uint8_t nr_fixed_counters) +{ + unsigned int i; + uint32_t bitmap; + uint64_t count; + + TEST_ASSERT(nr_fixed_counters < sizeof(bitmap) * 8, + "Invalid nr_fixed_counters"); + + /* + * Check the fixed performance counter can count normally when KVM + * userspace doesn't set any pmu filter. + */ + count = run_vcpu_to_sync(vcpu); + TEST_ASSERT(count, "Unexpected count value: %ld\n", count); + + for (i = 0; i < BIT(nr_fixed_counters); i++) { + bitmap = BIT(i); + count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_ALLOW, + bitmap); + TEST_ASSERT_EQ(!!count, !!(bitmap & BIT(idx))); + + count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_DENY, + bitmap); + TEST_ASSERT_EQ(!!count, !(bitmap & BIT(idx))); + } +} + +static void test_fixed_counter_bitmap(void) +{ + uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); + struct kvm_vm *vm; + struct kvm_vcpu *vcpu; + uint8_t idx; + + /* + * Check that pmu_event_filter works as expected when it's applied to + * fixed performance counters. + */ + for (idx = 0; idx < nr_fixed_counters; idx++) { + vm = vm_create_with_one_vcpu(&vcpu, + intel_run_fixed_counter_guest_code); + vcpu_args_set(vcpu, 1, idx); + __test_fixed_counter_bitmap(vcpu, idx, nr_fixed_counters); + kvm_vm_free(vm); + } +} + int main(int argc, char *argv[]) { void (*guest_code)(void); @@ -851,6 +930,7 @@ int main(int argc, char *argv[]) kvm_vm_free(vm); test_pmu_config_disable(guest_code); + test_fixed_counter_bitmap(); return 0; } -- cgit From 740d087e7fff2a68fb1c7f01efb239e45f217485 Mon Sep 17 00:00:00 2001 From: Jinrong Liang Date: Thu, 10 Aug 2023 17:09:45 +0800 Subject: KVM: selftests: Test gp event filters don't affect fixed event filters Add a test to ensure that setting both generic and fixed performance event filters does not affect the consistency of the fixed event filter behavior in KVM. Signed-off-by: Jinrong Liang Link: https://lore.kernel.org/r/20230810090945.16053-7-cloudliang@tencent.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_event_filter_test.c | 27 ++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index c9355ca4cd50..283cc55597a4 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -838,6 +838,19 @@ static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu, return run_vcpu_to_sync(vcpu); } +static uint64_t test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu, + uint32_t action, + uint32_t bitmap) +{ + struct __kvm_pmu_event_filter f = base_event_filter; + + f.action = action; + f.fixed_counter_bitmap = bitmap; + set_pmu_event_filter(vcpu, &f); + + return run_vcpu_to_sync(vcpu); +} + static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx, uint8_t nr_fixed_counters) { @@ -864,6 +877,20 @@ static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx, count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_DENY, bitmap); TEST_ASSERT_EQ(!!count, !(bitmap & BIT(idx))); + + /* + * Check that fixed_counter_bitmap has higher priority than + * events[] when both are set. + */ + count = test_set_gp_and_fixed_event_filter(vcpu, + KVM_PMU_EVENT_ALLOW, + bitmap); + TEST_ASSERT_EQ(!!count, !!(bitmap & BIT(idx))); + + count = test_set_gp_and_fixed_event_filter(vcpu, + KVM_PMU_EVENT_DENY, + bitmap); + TEST_ASSERT_EQ(!!count, !(bitmap & BIT(idx))); } } -- cgit From ec3eb9ed6081bea8ebf603ff545dba127071b928 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 18 Aug 2023 21:39:44 -0700 Subject: KVM: arm64: PMU: Disallow vPMU on non-uniform PMUVer Disallow userspace from configuring vPMU for guests on systems where the PMUVer is not uniform across all PEs. KVM has not been advertising PMUv3 to the guests with vPMU on such systems anyway, and such systems would be extremely uncommon and unlikely to even use KVM. Signed-off-by: Reiji Watanabe Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230819043947.4100985-2-reijiw@google.com --- arch/arm64/kvm/pmu-emul.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 560650972478..689bbd88fd69 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -14,6 +14,7 @@ #include #include #include +#include #define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0) @@ -672,8 +673,11 @@ void kvm_host_pmu_init(struct arm_pmu *pmu) { struct arm_pmu_entry *entry; - if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI || - pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) + /* + * Check the sanitised PMU version for the system, as KVM does not + * support implementations where PMUv3 exists on a subset of CPUs. + */ + if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit())) return; mutex_lock(&arm_pmus_lock); -- cgit From 335ca49ff31f145c0f08540614062197a334e064 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 18 Aug 2023 21:39:45 -0700 Subject: KVM: arm64: PMU: Avoid inappropriate use of host's PMUVer Avoid using the PMUVer of the host's PMU hardware to determine the PMU event mask, except in one case, as the value of host's PMUVer may differ from the value of ID_AA64DFR0_EL1.PMUVer for the guest. The exception case is when using the PMUVer to determine the valid range of events for KVM_ARM_VCPU_PMU_V3_FILTER, as it has been allowing userspace to specify events that are valid for the PMU hardware, regardless of the value of the guest's ID_AA64DFR0_EL1.PMUVer. KVM will use a valid range of events based on the value of the guest's ID_AA64DFR0_EL1.PMUVer, in order to effectively filter events that the guest attempts to program though. Signed-off-by: Reiji Watanabe Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230819043947.4100985-3-reijiw@google.com --- arch/arm64/kvm/pmu-emul.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 689bbd88fd69..eaeb8fea7971 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -36,12 +36,8 @@ static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx) return &vcpu->arch.pmu.pmc[cnt_idx]; } -static u32 kvm_pmu_event_mask(struct kvm *kvm) +static u32 __kvm_pmu_event_mask(unsigned int pmuver) { - unsigned int pmuver; - - pmuver = kvm->arch.arm_pmu->pmuver; - switch (pmuver) { case ID_AA64DFR0_EL1_PMUVer_IMP: return GENMASK(9, 0); @@ -56,6 +52,14 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) } } +static u32 kvm_pmu_event_mask(struct kvm *kvm) +{ + u64 dfr0 = IDREG(kvm, SYS_ID_AA64DFR0_EL1); + u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0); + + return __kvm_pmu_event_mask(pmuver); +} + /** * kvm_pmc_is_64bit - determine if counter is 64bit * @pmc: counter context @@ -954,11 +958,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) return 0; } case KVM_ARM_VCPU_PMU_V3_FILTER: { + u8 pmuver = kvm_arm_pmu_get_pmuver_limit(); struct kvm_pmu_event_filter __user *uaddr; struct kvm_pmu_event_filter filter; int nr_events; - nr_events = kvm_pmu_event_mask(kvm) + 1; + /* + * Allow userspace to specify an event filter for the entire + * event range supported by PMUVer of the hardware, rather + * than the guest's PMUVer for KVM backward compatibility. + */ + nr_events = __kvm_pmu_event_mask(pmuver) + 1; uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr; -- cgit From 8c694f557fd80ca9815ddb1cf5de10d8bf168110 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 18 Aug 2023 21:39:46 -0700 Subject: KVM: arm64: PMU: Don't advertise the STALL_SLOT event Currently, KVM hides the STALL_SLOT event for guests if the host PMU version is PMUv3p4 or newer, as PMMIR_EL1 is handled as RAZ for the guests. But, this should be based on the guests' PMU version (instead of the host PMU version), as an older PMU that doesn't support PMMIR_EL1 could support the STALL_SLOT event, according to the Arm ARM. Exposing the STALL_SLOT event without PMMIR_EL1 won't be very useful anyway though. Stop advertising the STALL_SLOT event for guests unconditionally, rather than fixing or keeping the inaccurate checking to advertise the event for the case, where it is not very useful. Suggested-by: Oliver Upton Signed-off-by: Reiji Watanabe Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230819043947.4100985-4-reijiw@google.com --- arch/arm64/kvm/pmu-emul.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index eaeb8fea7971..b9633deff32a 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -761,8 +761,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled * as RAZ */ - if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4) - val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); + val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); base = 32; } -- cgit From 64b81000b60b70f10a5834023fe100902d9f7a57 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 18 Aug 2023 21:39:47 -0700 Subject: KVM: arm64: PMU: Don't advertise STALL_SLOT_{FRONTEND,BACKEND} Don't advertise STALL_SLOT_{FRONT,BACK}END events to the guest, similar to STALL_SLOT event, as when any of these three events are implemented, all three of them should be implemented, according to the Arm ARM. Suggested-by: Oliver Upton Signed-off-by: Reiji Watanabe Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230819043947.4100985-5-reijiw@google.com --- arch/arm64/kvm/pmu-emul.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index b9633deff32a..6b066e04dc5d 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -758,10 +758,12 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) } else { val = read_sysreg(pmceid1_el0); /* - * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled + * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled * as RAZ */ - val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); + val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) | + BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) | + BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32)); base = 32; } -- cgit From b1f778a223a2a8ad6262e5233cfc3483bcf6e213 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 20 Aug 2023 10:01:08 +0100 Subject: KVM: arm64: pmu: Resync EL0 state on counter rotation Huang Shijie reports that, when profiling a guest from the host with a number of events that exceeds the number of available counters, the reported counts are wildly inaccurate. Without the counter oversubscription, the reported counts are correct. Their investigation indicates that upon counter rotation (which takes place on the back of a timer interrupt), we fail to re-apply the guest EL0 enabling, leading to the counting of host events instead of guest events. In order to solve this, add yet another hook between the host PMU driver and KVM, re-applying the guest EL0 configuration if the right conditions apply (the host is VHE, we are in interrupt context, and we interrupted a running vcpu). This triggers a new vcpu request which will apply the correct configuration on guest reentry. With this, we have the correct counts, even when the counters are oversubscribed. Reported-by: Huang Shijie Suggested-by: Oliver Upton Tested_by: Huang Shijie Signed-off-by: Marc Zyngier Cc: Leo Yan Cc: Mark Rutland Cc: Will Deacon Link: https://lore.kernel.org/r/20230809013953.7692-1-shijie@os.amperecomputing.com Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20230820090108.177817-1-maz@kernel.org --- arch/arm/include/asm/arm_pmuv3.h | 2 ++ arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/arm.c | 3 +++ arch/arm64/kvm/pmu.c | 18 ++++++++++++++++++ drivers/perf/arm_pmuv3.c | 2 ++ include/kvm/arm_pmu.h | 2 ++ 6 files changed, 28 insertions(+) diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h index f3cd04ff022d..72529f5e2bed 100644 --- a/arch/arm/include/asm/arm_pmuv3.h +++ b/arch/arm/include/asm/arm_pmuv3.h @@ -227,6 +227,8 @@ static inline bool kvm_set_pmuserenr(u64 val) return false; } +static inline void kvm_vcpu_pmu_resync_el0(void) {} + /* PMU Version in DFR Register */ #define ARMV8_PMU_DFR_VER_NI 0 #define ARMV8_PMU_DFR_VER_V3P4 0x5 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index d3dd05bbfe23..553040e0e375 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -49,6 +49,7 @@ #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5) #define KVM_REQ_SUSPEND KVM_ARCH_REQ(6) +#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7) #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ KVM_DIRTY_LOG_INITIALLY_SET) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 72dc53a75d1c..978b0411082f 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -803,6 +803,9 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) kvm_pmu_handle_pmcr(vcpu, __vcpu_sys_reg(vcpu, PMCR_EL0)); + if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu)) + kvm_vcpu_pmu_restore_guest(vcpu); + if (kvm_check_request(KVM_REQ_SUSPEND, vcpu)) return kvm_vcpu_suspend(vcpu); diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index 121f1a14c829..0eea225fd09a 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -236,3 +236,21 @@ bool kvm_set_pmuserenr(u64 val) ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val; return true; } + +/* + * If we interrupted the guest to update the host PMU context, make + * sure we re-apply the guest EL0 state. + */ +void kvm_vcpu_pmu_resync_el0(void) +{ + struct kvm_vcpu *vcpu; + + if (!has_vhe() || !in_interrupt()) + return; + + vcpu = kvm_get_running_vcpu(); + if (!vcpu) + return; + + kvm_make_request(KVM_REQ_RESYNC_PMU_EL0, vcpu); +} diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 08b3a1bf0ef6..6a3d8176f54a 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -772,6 +772,8 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu) /* Enable all counters */ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); + + kvm_vcpu_pmu_resync_el0(); } static void armv8pmu_stop(struct arm_pmu *cpu_pmu) diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 847da6fc2713..3a8a70a60794 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -74,6 +74,7 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); struct kvm_pmu_events *kvm_get_pmu_events(void); void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); +void kvm_vcpu_pmu_resync_el0(void); #define kvm_vcpu_has_pmu(vcpu) \ (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) @@ -171,6 +172,7 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void) { return 0; } +static inline void kvm_vcpu_pmu_resync_el0(void) {} #endif -- cgit From 9b80b9676be901eb980c48543a19d7026d2f84b8 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 23 Aug 2023 10:14:48 +0100 Subject: KVM: arm64: pmu: Guard PMU emulation definitions with CONFIG_KVM Most of the internal definitions for PMU emulation are guarded with CONFIG_HW_PERF_EVENTS. However, this isn't enough, and leads to these definitions leaking if CONFIG_KVM isn't enabled. This leads to some compilation breakage in this exact configuration. Fix it by falling back to the dummy stubs if either perf or KVM isn't selected. Reported-by: Alexander Stein Tested-by: Alexander Stein Signed-off-by: Marc Zyngier --- include/kvm/arm_pmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 3a8a70a60794..31029f4f7be8 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -12,7 +12,7 @@ #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) -#ifdef CONFIG_HW_PERF_EVENTS +#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM) struct kvm_pmc { u8 idx; /* index into the pmu->pmc array */ -- cgit From c948a0a2f5dc729411a6cfbe0348c0aac5d8a07a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 21 Aug 2023 18:44:15 +0100 Subject: KVM: arm64: nv: Add trap description for SPSR_EL2 and ELR_EL2 Having carved a hole for SP_EL1, we are now missing the entries for SPSR_EL2 and ELR_EL2. Add them back. Reported-by: Miguel Luis Signed-off-by: Marc Zyngier --- arch/arm64/kvm/emulate-nested.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index 1cc606c16416..9ced1bf0c2b7 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -651,6 +651,8 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { SR_RANGE_TRAP(sys_reg(3, 4, 0, 0, 0), sys_reg(3, 4, 3, 15, 7), CGT_HCR_NV), /* Skip the SP_EL1 encoding... */ + SR_TRAP(SYS_SPSR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_ELR_EL2, CGT_HCR_NV), SR_RANGE_TRAP(sys_reg(3, 4, 4, 1, 1), sys_reg(3, 4, 10, 15, 7), CGT_HCR_NV), SR_RANGE_TRAP(sys_reg(3, 4, 12, 0, 0), -- cgit From f1187ef24eb8f36e8ad8106d22615ceddeea6097 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 24 Aug 2023 19:23:56 -0700 Subject: KVM: SVM: Get source vCPUs from source VM for SEV-ES intrahost migration Fix a goof where KVM tries to grab source vCPUs from the destination VM when doing intrahost migration. Grabbing the wrong vCPU not only hoses the guest, it also crashes the host due to the VMSA pointer being left NULL. BUG: unable to handle page fault for address: ffffe38687000000 #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page PGD 0 P4D 0 Oops: 0000 [#1] SMP NOPTI CPU: 39 PID: 17143 Comm: sev_migrate_tes Tainted: GO 6.5.0-smp--fff2e47e6c3b-next #151 Hardware name: Google, Inc. Arcadia_IT_80/Arcadia_IT_80, BIOS 34.28.0 07/10/2023 RIP: 0010:__free_pages+0x15/0xd0 RSP: 0018:ffff923fcf6e3c78 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffffe38687000000 RCX: 0000000000000100 RDX: 0000000000000100 RSI: 0000000000000000 RDI: ffffe38687000000 RBP: ffff923fcf6e3c88 R08: ffff923fcafb0000 R09: 0000000000000000 R10: 0000000000000000 R11: ffffffff83619b90 R12: ffff923fa9540000 R13: 0000000000080007 R14: ffff923f6d35d000 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff929d0d7c0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffffe38687000000 CR3: 0000005224c34005 CR4: 0000000000770ee0 PKRU: 55555554 Call Trace: sev_free_vcpu+0xcb/0x110 [kvm_amd] svm_vcpu_free+0x75/0xf0 [kvm_amd] kvm_arch_vcpu_destroy+0x36/0x140 [kvm] kvm_destroy_vcpus+0x67/0x100 [kvm] kvm_arch_destroy_vm+0x161/0x1d0 [kvm] kvm_put_kvm+0x276/0x560 [kvm] kvm_vm_release+0x25/0x30 [kvm] __fput+0x106/0x280 ____fput+0x12/0x20 task_work_run+0x86/0xb0 do_exit+0x2e3/0x9c0 do_group_exit+0xb1/0xc0 __x64_sys_exit_group+0x1b/0x20 do_syscall_64+0x41/0x90 entry_SYSCALL_64_after_hwframe+0x63/0xcd CR2: ffffe38687000000 Fixes: 6defa24d3b12 ("KVM: SEV: Init target VMCBs in sev_migrate_from") Cc: stable@vger.kernel.org Cc: Peter Gonda Reviewed-by: Peter Gonda Reviewed-by: Pankaj Gupta Link: https://lore.kernel.org/r/20230825022357.2852133-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/sev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 2cd15783dfb9..acc700bcb299 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1739,7 +1739,7 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm) * Note, the source is not required to have the same number of * vCPUs as the destination when migrating a vanilla SEV VM. */ - src_vcpu = kvm_get_vcpu(dst_kvm, i); + src_vcpu = kvm_get_vcpu(src_kvm, i); src_svm = to_svm(src_vcpu); /* -- cgit From 1952e74da96fb3e48b72a2d0ece78c688a5848c1 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 24 Aug 2023 19:23:57 -0700 Subject: KVM: SVM: Skip VMSA init in sev_es_init_vmcb() if pointer is NULL Skip initializing the VMSA physical address in the VMCB if the VMSA is NULL, which occurs during intrahost migration as KVM initializes the VMCB before copying over state from the source to the destination (including the VMSA and its physical address). In normal builds, __pa() is just math, so the bug isn't fatal, but with CONFIG_DEBUG_VIRTUAL=y, the validity of the virtual address is verified and passing in NULL will make the kernel unhappy. Fixes: 6defa24d3b12 ("KVM: SEV: Init target VMCBs in sev_migrate_from") Cc: stable@vger.kernel.org Cc: Peter Gonda Reviewed-by: Peter Gonda Reviewed-by: Pankaj Gupta Link: https://lore.kernel.org/r/20230825022357.2852133-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/sev.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index acc700bcb299..5585a3556179 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2975,9 +2975,12 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm) /* * An SEV-ES guest requires a VMSA area that is a separate from the * VMCB page. Do not include the encryption mask on the VMSA physical - * address since hardware will access it using the guest key. + * address since hardware will access it using the guest key. Note, + * the VMSA will be NULL if this vCPU is the destination for intrahost + * migration, and will be copied later. */ - svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa); + if (svm->sev_es.vmsa) + svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa); /* Can't intercept CR register access, HV can't modify CR registers */ svm_clr_intercept(svm, INTERCEPT_CR0_READ); -- cgit From cb49631ad111570f1bad37702c11c2ae07fa2e3c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 24 Aug 2023 18:36:18 -0700 Subject: KVM: SVM: Don't inject #UD if KVM attempts to skip SEV guest insn Don't inject a #UD if KVM attempts to "emulate" to skip an instruction for an SEV guest, and instead resume the guest and hope that it can make forward progress. When commit 04c40f344def ("KVM: SVM: Inject #UD on attempted emulation for SEV guest w/o insn buffer") added the completely arbitrary #UD behavior, there were no known scenarios where a well-behaved guest would induce a VM-Exit that triggered emulation, i.e. it was thought that injecting #UD would be helpful. However, now that KVM (correctly) attempts to re-inject INT3/INTO, e.g. if a #NPF is encountered when attempting to deliver the INT3/INTO, an SEV guest can trigger emulation without a buffer, through no fault of its own. Resuming the guest and retrying the INT3/INTO is architecturally wrong, e.g. the vCPU will incorrectly re-hit code #DBs, but for SEV guests there is literally no other option that has a chance of making forward progress. Drop the #UD injection for all "skip" emulation, not just those related to INT3/INTO, even though that means that the guest will likely end up in an infinite loop instead of getting a #UD (the vCPU may also crash, e.g. if KVM emulated everything about an instruction except for advancing RIP). There's no evidence that suggests that an unexpected #UD is actually better than hanging the vCPU, e.g. a soft-hung vCPU can still respond to IRQs and NMIs to generate a backtrace. Reported-by: Wu Zongyo Closes: https://lore.kernel.org/all/8eb933fd-2cf3-d7a9-32fe-2a1d82eac42a@mail.ustc.edu.cn Fixes: 6ef88d6e36c2 ("KVM: SVM: Re-inject INT3/INTO instead of retrying the instruction") Cc: stable@vger.kernel.org Cc: Tom Lendacky Link: https://lore.kernel.org/r/20230825013621.2845700-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index d76c8a0764f1..d7a474571ff1 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -365,6 +365,8 @@ static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; } +static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type, + void *insn, int insn_len); static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu, bool commit_side_effects) @@ -385,6 +387,14 @@ static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu, } if (!svm->next_rip) { + /* + * FIXME: Drop this when kvm_emulate_instruction() does the + * right thing and treats "can't emulate" as outright failure + * for EMULTYPE_SKIP. + */ + if (!svm_can_emulate_instruction(vcpu, EMULTYPE_SKIP, NULL, 0)) + return 0; + if (unlikely(!commit_side_effects)) old_rflags = svm->vmcb->save.rflags; @@ -4677,16 +4687,25 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type, * and cannot be decrypted by KVM, i.e. KVM would read cyphertext and * decode garbage. * - * Inject #UD if KVM reached this point without an instruction buffer. - * In practice, this path should never be hit by a well-behaved guest, - * e.g. KVM doesn't intercept #UD or #GP for SEV guests, but this path - * is still theoretically reachable, e.g. via unaccelerated fault-like - * AVIC access, and needs to be handled by KVM to avoid putting the - * guest into an infinite loop. Injecting #UD is somewhat arbitrary, - * but its the least awful option given lack of insight into the guest. + * If KVM is NOT trying to simply skip an instruction, inject #UD if + * KVM reached this point without an instruction buffer. In practice, + * this path should never be hit by a well-behaved guest, e.g. KVM + * doesn't intercept #UD or #GP for SEV guests, but this path is still + * theoretically reachable, e.g. via unaccelerated fault-like AVIC + * access, and needs to be handled by KVM to avoid putting the guest + * into an infinite loop. Injecting #UD is somewhat arbitrary, but + * its the least awful option given lack of insight into the guest. + * + * If KVM is trying to skip an instruction, simply resume the guest. + * If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM + * will attempt to re-inject the INT3/INTO and skip the instruction. + * In that scenario, retrying the INT3/INTO and hoping the guest will + * make forward progress is the only option that has a chance of + * success (and in practice it will work the vast majority of the time). */ if (unlikely(!insn)) { - kvm_queue_exception(vcpu, UD_VECTOR); + if (!(emul_type & EMULTYPE_SKIP)) + kvm_queue_exception(vcpu, UD_VECTOR); return false; } -- cgit From 80d0f521d59e08eeaa0bc5d624da139448fb99b8 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 24 Aug 2023 18:36:19 -0700 Subject: KVM: SVM: Require nrips support for SEV guests (and beyond) Disallow SEV (and beyond) if nrips is disabled via module param, as KVM can't read guest memory to partially emulate and skip an instruction. All CPUs that support SEV support NRIPS, i.e. this is purely stopping the user from shooting themselves in the foot. Cc: Tom Lendacky Link: https://lore.kernel.org/r/20230825013621.2845700-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/sev.c | 2 +- arch/x86/kvm/svm/svm.c | 11 ++++------- arch/x86/kvm/svm/svm.h | 1 + 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 5585a3556179..85d1abdf7d7d 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2185,7 +2185,7 @@ void __init sev_hardware_setup(void) bool sev_es_supported = false; bool sev_supported = false; - if (!sev_enabled || !npt_enabled) + if (!sev_enabled || !npt_enabled || !nrips) goto out; /* diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index d7a474571ff1..5cf2380c89dd 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -203,7 +203,7 @@ static int nested = true; module_param(nested, int, S_IRUGO); /* enable/disable Next RIP Save */ -static int nrips = true; +int nrips = true; module_param(nrips, int, 0444); /* enable/disable Virtual VMLOAD VMSAVE */ @@ -5156,9 +5156,11 @@ static __init int svm_hardware_setup(void) svm_adjust_mmio_mask(); + nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS); + /* * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which - * may be modified by svm_adjust_mmio_mask()). + * may be modified by svm_adjust_mmio_mask()), as well as nrips. */ sev_hardware_setup(); @@ -5170,11 +5172,6 @@ static __init int svm_hardware_setup(void) goto err; } - if (nrips) { - if (!boot_cpu_has(X86_FEATURE_NRIPS)) - nrips = false; - } - enable_apicv = avic = avic && avic_hardware_setup(); if (!enable_apicv) { diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 800ca1776b59..1498956a589f 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -33,6 +33,7 @@ #define MSRPM_OFFSETS 32 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; extern bool npt_enabled; +extern int nrips; extern int vgif; extern bool intercept_smi; extern bool x2avic_enabled; -- cgit From 5002b112a5ad23579b7c3a36c9748740bcdfc88e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 17 Aug 2023 16:34:29 -0700 Subject: KVM: selftests: Reload "good" vCPU state if vCPU hits shutdown Reload known good vCPU state if the vCPU triple faults in any of the race_sync_regs() subtests, e.g. if KVM successfully injects an exception (the vCPU isn't configured to handle exceptions). On Intel, the VMCS is preserved even after shutdown, but AMD's APM states that the VMCB is undefined after a shutdown and so KVM synthesizes an INIT to sanitize vCPU/VMCB state, e.g. to guard against running with a garbage VMCB. The synthetic INIT results in the vCPU never exiting to userspace, as it gets put into Real Mode at the reset vector, which is full of zeros (as is GPA 0 and beyond), and so executes ADD for a very, very long time. Fixes: 60c4063b4752 ("KVM: selftests: Extend x86's sync_regs_test to check for event vector races") Cc: Michal Luczaj Link: https://lore.kernel.org/r/20230817233430.1416463-2-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/sync_regs_test.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c index 93fac74ca0a7..21e99dae2ff2 100644 --- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c +++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c @@ -152,6 +152,7 @@ static noinline void *race_sregs_cr4(void *arg) static void race_sync_regs(void *racer) { const time_t TIMEOUT = 2; /* seconds, roughly */ + struct kvm_x86_state *state; struct kvm_translation tr; struct kvm_vcpu *vcpu; struct kvm_run *run; @@ -166,6 +167,9 @@ static void race_sync_regs(void *racer) vcpu_run(vcpu); run->kvm_valid_regs = 0; + /* Save state *before* spawning the thread that mucks with vCPU state. */ + state = vcpu_save_state(vcpu); + /* * Selftests run 64-bit guests by default, both EFER.LME and CR4.PAE * should already be set in guest state. @@ -179,7 +183,14 @@ static void race_sync_regs(void *racer) TEST_ASSERT_EQ(pthread_create(&thread, NULL, racer, (void *)run), 0); for (t = time(NULL) + TIMEOUT; time(NULL) < t;) { - __vcpu_run(vcpu); + /* + * Reload known good state if the vCPU triple faults, e.g. due + * to the unhandled #GPs being injected. VMX preserves state + * on shutdown, but SVM synthesizes an INIT as the VMCB state + * is architecturally undefined on triple fault. + */ + if (!__vcpu_run(vcpu) && run->exit_reason == KVM_EXIT_SHUTDOWN) + vcpu_load_state(vcpu, state); if (racer == race_sregs_cr4) { tr = (struct kvm_translation) { .linear_address = 0 }; @@ -190,6 +201,7 @@ static void race_sync_regs(void *racer) TEST_ASSERT_EQ(pthread_cancel(thread), 0); TEST_ASSERT_EQ(pthread_join(thread, NULL), 0); + kvm_x86_state_cleanup(state); kvm_vm_free(vm); } -- cgit From 02dc2543e3795867260826983bf3f20b282c8fc3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 17 Aug 2023 16:34:30 -0700 Subject: KVM: selftests: Explicit set #UD when *potentially* injecting exception Explicitly set the exception vector to #UD when potentially injecting an exception in sync_regs_test's subtests that try to detect TOCTOU bugs in KVM's handling of exceptions injected by userspace. A side effect of the original KVM bug was that KVM would clear the vector, but relying on KVM to clear the vector (i.e. make it #DE) makes it less likely that the test would ever find *new* KVM bugs, e.g. because only the first iteration would run with a legal vector to start. Explicitly inject #UD for race_events_inj_pen() as well, e.g. so that it doesn't inherit the illegal 255 vector from race_events_exc(), which currently runs first. Link: https://lore.kernel.org/r/20230817233430.1416463-3-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/sync_regs_test.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c index 21e99dae2ff2..00965ba33f73 100644 --- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c +++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c @@ -91,6 +91,8 @@ static void *race_events_inj_pen(void *arg) struct kvm_run *run = (struct kvm_run *)arg; struct kvm_vcpu_events *events = &run->s.regs.events; + WRITE_ONCE(events->exception.nr, UD_VECTOR); + for (;;) { WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS); WRITE_ONCE(events->flags, 0); @@ -115,6 +117,7 @@ static void *race_events_exc(void *arg) for (;;) { WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS); WRITE_ONCE(events->flags, 0); + WRITE_ONCE(events->exception.nr, UD_VECTOR); WRITE_ONCE(events->exception.pending, 1); WRITE_ONCE(events->exception.nr, 255); -- cgit From c92b922a8c526e1bb11945a703cba9f85976de7e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 17 Aug 2023 16:41:14 -0700 Subject: KVM: x86: Update MAINTAINTERS to include selftests Give KVM x86 the same treatment as all other KVM architectures, and officially take ownership of x86 specific KVM selftests (changes have been routed through kvm and/or kvm-x86 for quite some time). Cc: kvm@vger.kernel.org Link: https://lore.kernel.org/r/20230817234114.1420092-1-seanjc@google.com Signed-off-by: Sean Christopherson --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index aee340630eca..2adf76b044c8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11498,6 +11498,8 @@ F: arch/x86/include/uapi/asm/svm.h F: arch/x86/include/uapi/asm/vmx.h F: arch/x86/kvm/ F: arch/x86/kvm/*/ +F: tools/testing/selftests/kvm/*/x86_64/ +F: tools/testing/selftests/kvm/x86_64/ KERNFS M: Greg Kroah-Hartman -- cgit From 9ca0c1a1265c03d54f1c71005ea0375f9bccabee Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Aug 2023 10:42:15 -0700 Subject: KVM: VMX: Delete ancient pr_warn() about KVM_SET_TSS_ADDR not being set Delete KVM's printk about KVM_SET_TSS_ADDR not being called. When the printk was added by commit 776e58ea3d37 ("KVM: unbreak userspace that does not sets tss address"), KVM also stuffed a "hopefully safe" value, i.e. the message wasn't purely informational. For reasons unknown, ostensibly to try and help people running outdated qemu-kvm versions, the message got left behind when KVM's stuffing was removed by commit 4918c6ca6838 ("KVM: VMX: Require KVM_SET_TSS_ADDR being called prior to running a VCPU"). Today, the message is completely nonsensical, as it has been over a decade since KVM supported userspace running a Real Mode guest, on a CPU without unrestricted guest support, without doing KVM_SET_TSS_ADDR before KVM_RUN. I.e. KVM's ABI has required KVM_SET_TSS_ADDR for 10+ years. To make matters worse, the message is prone to false positives as it triggers when simply *creating* a vCPU due to RESET putting vCPUs into Real Mode, even when the user has no intention of ever *running* the vCPU in a Real Mode. E.g. KVM selftests stuff 64-bit mode and never touch Real Mode, but trigger the message even though they run just fine without doing KVM_SET_TSS_ADDR. Creating "dummy" vCPUs, e.g. to probe features, can also trigger the message. In both scenarios, the message confuses users and falsely implies that they've done something wrong. Reported-by: Thorsten Glaser Closes: https://lkml.kernel.org/r/f1afa6c0-cde2-ab8b-ea71-bfa62a45b956%40tarent.de Link: https://lore.kernel.org/r/20230815174215.433222-1-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index f848f43fa3e2..b0093b8a72d2 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3047,13 +3047,6 @@ static void enter_rmode(struct kvm_vcpu *vcpu) vmx->rmode.vm86_active = 1; - /* - * Very old userspace does not call KVM_SET_TSS_ADDR before entering - * vcpu. Warn the user that an update is overdue. - */ - if (!kvm_vmx->tss_addr) - pr_warn_once("KVM_SET_TSS_ADDR needs to be called before running vCPU\n"); - vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr); -- cgit From f156a7d13fc35d0078cd644b8cf0a6f97cbbe2e2 Mon Sep 17 00:00:00 2001 From: Vincent Donnefort Date: Fri, 11 Aug 2023 12:20:37 +0100 Subject: KVM: arm64: Remove size-order align in the nVHE hyp private VA range commit f922c13e778d ("KVM: arm64: Introduce pkvm_alloc_private_va_range()") and commit 92abe0f81e13 ("KVM: arm64: Introduce hyp_alloc_private_va_range()") added an alignment for the start address of any allocation into the nVHE hypervisor private VA range. This alignment (order of the size of the allocation) intends to enable efficient stack verification (if the PAGE_SHIFT bit is zero, the stack pointer is on the guard page and a stack overflow occurred). But this is only necessary for stack allocation and can waste a lot of VA space. So instead make stack-specific functions, handling the guard page requirements, while other users (e.g. fixmap) will only get page alignment. Reviewed-by: Kalesh Singh Signed-off-by: Vincent Donnefort Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230811112037.1147863-1-vdonnefort@google.com --- arch/arm64/include/asm/kvm_mmu.h | 1 + arch/arm64/kvm/arm.c | 26 +---------- arch/arm64/kvm/hyp/include/nvhe/mm.h | 1 + arch/arm64/kvm/hyp/nvhe/mm.c | 83 +++++++++++++++++++++++++++-------- arch/arm64/kvm/hyp/nvhe/setup.c | 27 +----------- arch/arm64/kvm/mmu.c | 85 ++++++++++++++++++++++++++++-------- 6 files changed, 138 insertions(+), 85 deletions(-) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 0e1e1ab17b4d..96a80e8f6226 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -168,6 +168,7 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, void __iomem **haddr); int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, void **haddr); +int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr); void __init free_hyp_pgds(void); void stage2_unmap_vm(struct kvm *kvm); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 3c015bdd35ee..93ada0aae507 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2283,30 +2283,8 @@ static int __init init_hyp_mode(void) for_each_possible_cpu(cpu) { struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); - unsigned long hyp_addr; - /* - * Allocate a contiguous HYP private VA range for the stack - * and guard page. The allocation is also aligned based on - * the order of its size. - */ - err = hyp_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr); - if (err) { - kvm_err("Cannot allocate hyp stack guard page\n"); - goto out_err; - } - - /* - * Since the stack grows downwards, map the stack to the page - * at the higher address and leave the lower guard page - * unbacked. - * - * Any valid stack address now has the PAGE_SHIFT bit as 1 - * and addresses corresponding to the guard page have the - * PAGE_SHIFT bit as 0 - this is used for overflow detection. - */ - err = __create_hyp_mappings(hyp_addr + PAGE_SIZE, PAGE_SIZE, - __pa(stack_page), PAGE_HYP); + err = create_hyp_stack(__pa(stack_page), ¶ms->stack_hyp_va); if (err) { kvm_err("Cannot map hyp stack\n"); goto out_err; @@ -2319,8 +2297,6 @@ static int __init init_hyp_mode(void) * has been mapped in the flexible private VA space. */ params->stack_pa = __pa(stack_page); - - params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE); } for_each_possible_cpu(cpu) { diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h index d5ec972b5c1e..230e4f2527de 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/mm.h +++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h @@ -26,6 +26,7 @@ int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot int __pkvm_create_private_mapping(phys_addr_t phys, size_t size, enum kvm_pgtable_prot prot, unsigned long *haddr); +int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr); int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr); #endif /* __KVM_HYP_MM_H */ diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c index 318298eb3d6b..65a7a186d7b2 100644 --- a/arch/arm64/kvm/hyp/nvhe/mm.c +++ b/arch/arm64/kvm/hyp/nvhe/mm.c @@ -44,6 +44,27 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size, return err; } +static int __pkvm_alloc_private_va_range(unsigned long start, size_t size) +{ + unsigned long cur; + + hyp_assert_lock_held(&pkvm_pgd_lock); + + if (!start || start < __io_map_base) + return -EINVAL; + + /* The allocated size is always a multiple of PAGE_SIZE */ + cur = start + PAGE_ALIGN(size); + + /* Are we overflowing on the vmemmap ? */ + if (cur > __hyp_vmemmap) + return -ENOMEM; + + __io_map_base = cur; + + return 0; +} + /** * pkvm_alloc_private_va_range - Allocates a private VA range. * @size: The size of the VA range to reserve. @@ -56,27 +77,16 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size, */ int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr) { - unsigned long base, addr; - int ret = 0; + unsigned long addr; + int ret; hyp_spin_lock(&pkvm_pgd_lock); - - /* Align the allocation based on the order of its size */ - addr = ALIGN(__io_map_base, PAGE_SIZE << get_order(size)); - - /* The allocated size is always a multiple of PAGE_SIZE */ - base = addr + PAGE_ALIGN(size); - - /* Are we overflowing on the vmemmap ? */ - if (!addr || base > __hyp_vmemmap) - ret = -ENOMEM; - else { - __io_map_base = base; - *haddr = addr; - } - + addr = __io_map_base; + ret = __pkvm_alloc_private_va_range(addr, size); hyp_spin_unlock(&pkvm_pgd_lock); + *haddr = addr; + return ret; } @@ -340,6 +350,45 @@ int hyp_create_idmap(u32 hyp_va_bits) return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC); } +int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr) +{ + unsigned long addr, prev_base; + size_t size; + int ret; + + hyp_spin_lock(&pkvm_pgd_lock); + + prev_base = __io_map_base; + /* + * Efficient stack verification using the PAGE_SHIFT bit implies + * an alignment of our allocation on the order of the size. + */ + size = PAGE_SIZE * 2; + addr = ALIGN(__io_map_base, size); + + ret = __pkvm_alloc_private_va_range(addr, size); + if (!ret) { + /* + * Since the stack grows downwards, map the stack to the page + * at the higher address and leave the lower guard page + * unbacked. + * + * Any valid stack address now has the PAGE_SHIFT bit as 1 + * and addresses corresponding to the guard page have the + * PAGE_SHIFT bit as 0 - this is used for overflow detection. + */ + ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE, + PAGE_SIZE, phys, PAGE_HYP); + if (ret) + __io_map_base = prev_base; + } + hyp_spin_unlock(&pkvm_pgd_lock); + + *haddr = addr + size; + + return ret; +} + static void *admit_host_page(void *arg) { struct kvm_hyp_memcache *host_mc = arg; diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index bb98630dfeaf..0d5e0a89ddce 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -113,7 +113,6 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, for (i = 0; i < hyp_nr_cpus; i++) { struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i); - unsigned long hyp_addr; start = (void *)kern_hyp_va(per_cpu_base[i]); end = start + PAGE_ALIGN(hyp_percpu_size); @@ -121,33 +120,9 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, if (ret) return ret; - /* - * Allocate a contiguous HYP private VA range for the stack - * and guard page. The allocation is also aligned based on - * the order of its size. - */ - ret = pkvm_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr); + ret = pkvm_create_stack(params->stack_pa, ¶ms->stack_hyp_va); if (ret) return ret; - - /* - * Since the stack grows downwards, map the stack to the page - * at the higher address and leave the lower guard page - * unbacked. - * - * Any valid stack address now has the PAGE_SHIFT bit as 1 - * and addresses corresponding to the guard page have the - * PAGE_SHIFT bit as 0 - this is used for overflow detection. - */ - hyp_spin_lock(&pkvm_pgd_lock); - ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + PAGE_SIZE, - PAGE_SIZE, params->stack_pa, PAGE_HYP); - hyp_spin_unlock(&pkvm_pgd_lock); - if (ret) - return ret; - - /* Update stack_hyp_va to end of the stack's private VA range */ - params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE); } /* diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 137b775d238b..499555e04732 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -592,6 +592,25 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot) return 0; } +static int __hyp_alloc_private_va_range(unsigned long base) +{ + lockdep_assert_held(&kvm_hyp_pgd_mutex); + + if (!PAGE_ALIGNED(base)) + return -EINVAL; + + /* + * Verify that BIT(VA_BITS - 1) hasn't been flipped by + * allocating the new area, as it would indicate we've + * overflowed the idmap/IO address range. + */ + if ((base ^ io_map_base) & BIT(VA_BITS - 1)) + return -ENOMEM; + + io_map_base = base; + + return 0; +} /** * hyp_alloc_private_va_range - Allocates a private VA range. @@ -612,26 +631,16 @@ int hyp_alloc_private_va_range(size_t size, unsigned long *haddr) /* * This assumes that we have enough space below the idmap - * page to allocate our VAs. If not, the check below will - * kick. A potential alternative would be to detect that - * overflow and switch to an allocation above the idmap. + * page to allocate our VAs. If not, the check in + * __hyp_alloc_private_va_range() will kick. A potential + * alternative would be to detect that overflow and switch + * to an allocation above the idmap. * * The allocated size is always a multiple of PAGE_SIZE. */ - base = io_map_base - PAGE_ALIGN(size); - - /* Align the allocation based on the order of its size */ - base = ALIGN_DOWN(base, PAGE_SIZE << get_order(size)); - - /* - * Verify that BIT(VA_BITS - 1) hasn't been flipped by - * allocating the new area, as it would indicate we've - * overflowed the idmap/IO address range. - */ - if ((base ^ io_map_base) & BIT(VA_BITS - 1)) - ret = -ENOMEM; - else - *haddr = io_map_base = base; + size = PAGE_ALIGN(size); + base = io_map_base - size; + ret = __hyp_alloc_private_va_range(base); mutex_unlock(&kvm_hyp_pgd_mutex); @@ -668,6 +677,48 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, return ret; } +int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr) +{ + unsigned long base; + size_t size; + int ret; + + mutex_lock(&kvm_hyp_pgd_mutex); + /* + * Efficient stack verification using the PAGE_SHIFT bit implies + * an alignment of our allocation on the order of the size. + */ + size = PAGE_SIZE * 2; + base = ALIGN_DOWN(io_map_base - size, size); + + ret = __hyp_alloc_private_va_range(base); + + mutex_unlock(&kvm_hyp_pgd_mutex); + + if (ret) { + kvm_err("Cannot allocate hyp stack guard page\n"); + return ret; + } + + /* + * Since the stack grows downwards, map the stack to the page + * at the higher address and leave the lower guard page + * unbacked. + * + * Any valid stack address now has the PAGE_SHIFT bit as 1 + * and addresses corresponding to the guard page have the + * PAGE_SHIFT bit as 0 - this is used for overflow detection. + */ + ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr, + PAGE_HYP); + if (ret) + kvm_err("Cannot map hyp stack\n"); + + *haddr = base + size; + + return ret; +} + /** * create_hyp_io_mappings - Map IO into both kernel and HYP * @phys_addr: The physical start address which gets mapped -- cgit From 16631c42e6ff4f5acf30998fda6c2ea09d1adbfe Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 25 Jul 2023 16:37:16 +0200 Subject: KVM: s390: interrupt: Fix single-stepping into interrupt handlers After single-stepping an instruction that generates an interrupt, GDB ends up on the second instruction of the respective interrupt handler. The reason is that vcpu_pre_run() manually delivers the interrupt, and then __vcpu_run() runs the first handler instruction using the CPUSTAT_P flag. This causes a KVM_SINGLESTEP exit on the second handler instruction. Fix by delaying the KVM_SINGLESTEP exit until after the manual interrupt delivery. Acked-by: David Hildenbrand Reviewed-by: Claudio Imbrenda Signed-off-by: Ilya Leoshkevich Message-ID: <20230725143857.228626-2-iii@linux.ibm.com> Signed-off-by: Claudio Imbrenda Signed-off-by: Janosch Frank --- arch/s390/kvm/interrupt.c | 14 ++++++++++++++ arch/s390/kvm/kvm-s390.c | 4 ++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 9bd0a873f3b1..85e39f472bb4 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1392,6 +1392,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; int rc = 0; + bool delivered = false; unsigned long irq_type; unsigned long irqs; @@ -1465,6 +1466,19 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) WARN_ONCE(1, "Unknown pending irq type %ld", irq_type); clear_bit(irq_type, &li->pending_irqs); } + delivered |= !rc; + } + + /* + * We delivered at least one interrupt and modified the PC. Force a + * singlestep event now. + */ + if (delivered && guestdbg_sstep_enabled(vcpu)) { + struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch; + + debug_exit->addr = vcpu->arch.sie_block->gpsw.addr; + debug_exit->type = KVM_SINGLESTEP; + vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING; } set_intercept_indicators(vcpu); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d1e768bcfe1d..0c6333b108ba 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -4611,7 +4611,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) if (!kvm_is_ucontrol(vcpu->kvm)) { rc = kvm_s390_deliver_pending_interrupts(vcpu); - if (rc) + if (rc || guestdbg_exit_pending(vcpu)) return rc; } @@ -4738,7 +4738,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) do { rc = vcpu_pre_run(vcpu); - if (rc) + if (rc || guestdbg_exit_pending(vcpu)) break; kvm_vcpu_srcu_read_unlock(vcpu); -- cgit From 74a439ef7b67d89d29ec7485c3aeca20a64449c5 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 25 Jul 2023 16:37:17 +0200 Subject: KVM: s390: interrupt: Fix single-stepping into program interrupt handlers Currently, after single-stepping an instruction that generates a specification exception, GDB ends up on the instruction immediately following it. The reason is that vcpu_post_run() injects the interrupt and sets KVM_GUESTDBG_EXIT_PENDING, causing a KVM_SINGLESTEP exit. The interrupt is not delivered, however, therefore userspace sees the address of the next instruction. Fix by letting the __vcpu_run() loop go into the next iteration, where vcpu_pre_run() delivers the interrupt and sets KVM_GUESTDBG_EXIT_PENDING. Reviewed-by: David Hildenbrand Signed-off-by: Ilya Leoshkevich Reviewed-by: Claudio Imbrenda Message-ID: <20230725143857.228626-3-iii@linux.ibm.com> Signed-off-by: Claudio Imbrenda Signed-off-by: Janosch Frank --- arch/s390/kvm/intercept.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 954d39adf85c..e54496740859 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -228,6 +228,21 @@ static int handle_itdb(struct kvm_vcpu *vcpu) #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER) +static bool should_handle_per_event(const struct kvm_vcpu *vcpu) +{ + if (!guestdbg_enabled(vcpu) || !per_event(vcpu)) + return false; + if (guestdbg_sstep_enabled(vcpu) && + vcpu->arch.sie_block->iprcc != PGM_PER) { + /* + * __vcpu_run() will exit after delivering the concurrently + * indicated condition. + */ + return false; + } + return true; +} + static int handle_prog(struct kvm_vcpu *vcpu) { psw_t psw; @@ -242,7 +257,7 @@ static int handle_prog(struct kvm_vcpu *vcpu) if (kvm_s390_pv_cpu_is_protected(vcpu)) return -EOPNOTSUPP; - if (guestdbg_enabled(vcpu) && per_event(vcpu)) { + if (should_handle_per_event(vcpu)) { rc = kvm_s390_handle_per_event(vcpu); if (rc) return rc; -- cgit From ba853a4e1c7addc631df55535bf0b04c62dc79d8 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 25 Jul 2023 16:37:18 +0200 Subject: KVM: s390: interrupt: Fix single-stepping kernel-emulated instructions Single-stepping a kernel-emulated instruction that generates an interrupt causes GDB to land on the instruction following it instead of the respective interrupt handler. The reason is that kvm_handle_sie_intercept(), after injecting the interrupt, also processes the PER event and arranges a KVM_SINGLESTEP exit. The interrupt is not yet delivered, however, so the userspace sees the next instruction. Fix by avoiding the KVM_SINGLESTEP exit when there is a pending interrupt. The next __vcpu_run() loop iteration will arrange a KVM_SINGLESTEP exit after delivering the interrupt. Reviewed-by: David Hildenbrand Reviewed-by: Claudio Imbrenda Signed-off-by: Ilya Leoshkevich Message-ID: <20230725143857.228626-4-iii@linux.ibm.com> Signed-off-by: Claudio Imbrenda Signed-off-by: Janosch Frank --- arch/s390/kvm/intercept.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index e54496740859..db222c749e5e 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -583,6 +583,19 @@ static int handle_pv_notification(struct kvm_vcpu *vcpu) return handle_instruction(vcpu); } +static bool should_handle_per_ifetch(const struct kvm_vcpu *vcpu, int rc) +{ + /* Process PER, also if the instruction is processed in user space. */ + if (!(vcpu->arch.sie_block->icptstatus & 0x02)) + return false; + if (rc != 0 && rc != -EOPNOTSUPP) + return false; + if (guestdbg_sstep_enabled(vcpu) && vcpu->arch.local_int.pending_irqs) + /* __vcpu_run() will exit after delivering the interrupt. */ + return false; + return true; +} + int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) { int rc, per_rc = 0; @@ -645,9 +658,7 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; } - /* process PER, also if the instruction is processed in user space */ - if (vcpu->arch.sie_block->icptstatus & 0x02 && - (!rc || rc == -EOPNOTSUPP)) + if (should_handle_per_ifetch(vcpu, rc)) per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu); return per_rc ? per_rc : rc; } -- cgit From 1ad1fa820e6424ae75d3d9f59774e40c9c7ec1e5 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 25 Jul 2023 16:37:19 +0200 Subject: KVM: s390: interrupt: Fix single-stepping userspace-emulated instructions Single-stepping a userspace-emulated instruction that generates an interrupt causes GDB to land on the instruction following it instead of the respective interrupt handler. The reason is that after arranging a KVM_EXIT_S390_SIEIC exit, kvm_handle_sie_intercept() calls kvm_s390_handle_per_ifetch_icpt(), which sets KVM_GUESTDBG_EXIT_PENDING. This bit, however, is not processed immediately, but rather persists until the next ioctl(), causing a spurious single-step exit. Fix by clearing this bit in ioctl(). Reviewed-by: David Hildenbrand Reviewed-by: Claudio Imbrenda Signed-off-by: Ilya Leoshkevich Message-ID: <20230725143857.228626-5-iii@linux.ibm.com> Signed-off-by: Claudio Imbrenda Signed-off-by: Janosch Frank --- arch/s390/kvm/kvm-s390.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0c6333b108ba..e6511608280c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5383,6 +5383,7 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp, { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; + int rc; switch (ioctl) { case KVM_S390_IRQ: { @@ -5390,7 +5391,8 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp, if (copy_from_user(&s390irq, argp, sizeof(s390irq))) return -EFAULT; - return kvm_s390_inject_vcpu(vcpu, &s390irq); + rc = kvm_s390_inject_vcpu(vcpu, &s390irq); + break; } case KVM_S390_INTERRUPT: { struct kvm_s390_interrupt s390int; @@ -5400,10 +5402,25 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp, return -EFAULT; if (s390int_to_s390irq(&s390int, &s390irq)) return -EINVAL; - return kvm_s390_inject_vcpu(vcpu, &s390irq); + rc = kvm_s390_inject_vcpu(vcpu, &s390irq); + break; } + default: + rc = -ENOIOCTLCMD; + break; } - return -ENOIOCTLCMD; + + /* + * To simplify single stepping of userspace-emulated instructions, + * KVM_EXIT_S390_SIEIC exit sets KVM_GUESTDBG_EXIT_PENDING (see + * should_handle_per_ifetch()). However, if userspace emulation injects + * an interrupt, it needs to be cleared, so that KVM_EXIT_DEBUG happens + * after (and not before) the interrupt delivery. + */ + if (!rc) + vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING; + + return rc; } static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu, -- cgit From fdbeb55ebdf1d83e610303f7e5c50a4d6904b7fa Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 25 Jul 2023 16:37:20 +0200 Subject: KVM: s390: interrupt: Fix single-stepping keyless mode exits kvm_s390_skey_check_enable() does not emulate any instructions, rather, it clears CPUSTAT_KSS and arranges the instruction that caused the exit (e.g., ISKE, SSKE, RRBE or LPSWE with a keyed PSW) to run again. Therefore, skip the PER check and let the instruction execution happen. Otherwise, a debugger will see two single-step events on the same instruction. Reviewed-by: Christian Borntraeger Reviewed-by: David Hildenbrand Signed-off-by: Ilya Leoshkevich Message-ID: <20230725143857.228626-6-iii@linux.ibm.com> Signed-off-by: Claudio Imbrenda Signed-off-by: Janosch Frank --- arch/s390/kvm/intercept.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index db222c749e5e..9f64f27f086e 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -630,8 +630,8 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) rc = handle_partial_execution(vcpu); break; case ICPT_KSS: - rc = kvm_s390_skey_check_enable(vcpu); - break; + /* Instruction will be redriven, skip the PER check. */ + return kvm_s390_skey_check_enable(vcpu); case ICPT_MCHKREQ: case ICPT_INT_ENABLE: /* -- cgit From 642dbc0312d67781dabf97a70b43810165f21527 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 25 Jul 2023 16:37:21 +0200 Subject: KVM: s390: selftests: Add selftest for single-stepping Test different variations of single-stepping into interrupts: - SVC and PGM interrupts; - Interrupts generated by ISKE; - Interrupts generated by instructions emulated by KVM; - Interrupts generated by instructions emulated by userspace. Reviewed-by: Claudio Imbrenda Signed-off-by: Ilya Leoshkevich Message-ID: <20230725143857.228626-7-iii@linux.ibm.com> Signed-off-by: Claudio Imbrenda [frankja@de.igm.com: s/ASSERT_EQ/TEST_ASSERT_EQ/ because function was renamed in the selftest printf series] Signed-off-by: Janosch Frank --- tools/testing/selftests/kvm/Makefile | 1 + tools/testing/selftests/kvm/s390x/debug_test.c | 160 +++++++++++++++++++++++++ 2 files changed, 161 insertions(+) create mode 100644 tools/testing/selftests/kvm/s390x/debug_test.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 77026907968f..6092ccfc49ac 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -169,6 +169,7 @@ TEST_GEN_PROGS_s390x += s390x/resets TEST_GEN_PROGS_s390x += s390x/sync_regs_test TEST_GEN_PROGS_s390x += s390x/tprot TEST_GEN_PROGS_s390x += s390x/cmma_test +TEST_GEN_PROGS_s390x += s390x/debug_test TEST_GEN_PROGS_s390x += demand_paging_test TEST_GEN_PROGS_s390x += dirty_log_test TEST_GEN_PROGS_s390x += guest_print_test diff --git a/tools/testing/selftests/kvm/s390x/debug_test.c b/tools/testing/selftests/kvm/s390x/debug_test.c new file mode 100644 index 000000000000..84313fb27529 --- /dev/null +++ b/tools/testing/selftests/kvm/s390x/debug_test.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Test KVM debugging features. */ +#include "kvm_util.h" +#include "test_util.h" + +#include + +#define __LC_SVC_NEW_PSW 0x1c0 +#define __LC_PGM_NEW_PSW 0x1d0 +#define ICPT_INSTRUCTION 0x04 +#define IPA0_DIAG 0x8300 +#define PGM_SPECIFICATION 0x06 + +/* Common code for testing single-stepping interruptions. */ +extern char int_handler[]; +asm("int_handler:\n" + "j .\n"); + +static struct kvm_vm *test_step_int_1(struct kvm_vcpu **vcpu, void *guest_code, + size_t new_psw_off, uint64_t *new_psw) +{ + struct kvm_guest_debug debug = {}; + struct kvm_regs regs; + struct kvm_vm *vm; + char *lowcore; + + vm = vm_create_with_one_vcpu(vcpu, guest_code); + lowcore = addr_gpa2hva(vm, 0); + new_psw[0] = (*vcpu)->run->psw_mask; + new_psw[1] = (uint64_t)int_handler; + memcpy(lowcore + new_psw_off, new_psw, 16); + vcpu_regs_get(*vcpu, ®s); + regs.gprs[2] = -1; + vcpu_regs_set(*vcpu, ®s); + debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; + vcpu_guest_debug_set(*vcpu, &debug); + vcpu_run(*vcpu); + + return vm; +} + +static void test_step_int(void *guest_code, size_t new_psw_off) +{ + struct kvm_vcpu *vcpu; + uint64_t new_psw[2]; + struct kvm_vm *vm; + + vm = test_step_int_1(&vcpu, guest_code, new_psw_off, new_psw); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_DEBUG); + TEST_ASSERT_EQ(vcpu->run->psw_mask, new_psw[0]); + TEST_ASSERT_EQ(vcpu->run->psw_addr, new_psw[1]); + kvm_vm_free(vm); +} + +/* Test single-stepping "boring" program interruptions. */ +extern char test_step_pgm_guest_code[]; +asm("test_step_pgm_guest_code:\n" + ".insn rr,0x1d00,%r1,%r0 /* dr %r1,%r0 */\n" + "j .\n"); + +static void test_step_pgm(void) +{ + test_step_int(test_step_pgm_guest_code, __LC_PGM_NEW_PSW); +} + +/* + * Test single-stepping program interruptions caused by DIAG. + * Userspace emulation must not interfere with single-stepping. + */ +extern char test_step_pgm_diag_guest_code[]; +asm("test_step_pgm_diag_guest_code:\n" + "diag %r0,%r0,0\n" + "j .\n"); + +static void test_step_pgm_diag(void) +{ + struct kvm_s390_irq irq = { + .type = KVM_S390_PROGRAM_INT, + .u.pgm.code = PGM_SPECIFICATION, + }; + struct kvm_vcpu *vcpu; + uint64_t new_psw[2]; + struct kvm_vm *vm; + + vm = test_step_int_1(&vcpu, test_step_pgm_diag_guest_code, + __LC_PGM_NEW_PSW, new_psw); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC); + TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, ICPT_INSTRUCTION); + TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa & 0xff00, IPA0_DIAG); + vcpu_ioctl(vcpu, KVM_S390_IRQ, &irq); + vcpu_run(vcpu); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_DEBUG); + TEST_ASSERT_EQ(vcpu->run->psw_mask, new_psw[0]); + TEST_ASSERT_EQ(vcpu->run->psw_addr, new_psw[1]); + kvm_vm_free(vm); +} + +/* + * Test single-stepping program interruptions caused by ISKE. + * CPUSTAT_KSS handling must not interfere with single-stepping. + */ +extern char test_step_pgm_iske_guest_code[]; +asm("test_step_pgm_iske_guest_code:\n" + "iske %r2,%r2\n" + "j .\n"); + +static void test_step_pgm_iske(void) +{ + test_step_int(test_step_pgm_iske_guest_code, __LC_PGM_NEW_PSW); +} + +/* + * Test single-stepping program interruptions caused by LCTL. + * KVM emulation must not interfere with single-stepping. + */ +extern char test_step_pgm_lctl_guest_code[]; +asm("test_step_pgm_lctl_guest_code:\n" + "lctl %c0,%c0,1\n" + "j .\n"); + +static void test_step_pgm_lctl(void) +{ + test_step_int(test_step_pgm_lctl_guest_code, __LC_PGM_NEW_PSW); +} + +/* Test single-stepping supervisor-call interruptions. */ +extern char test_step_svc_guest_code[]; +asm("test_step_svc_guest_code:\n" + "svc 0\n" + "j .\n"); + +static void test_step_svc(void) +{ + test_step_int(test_step_svc_guest_code, __LC_SVC_NEW_PSW); +} + +/* Run all tests above. */ +static struct testdef { + const char *name; + void (*test)(void); +} testlist[] = { + { "single-step pgm", test_step_pgm }, + { "single-step pgm caused by diag", test_step_pgm_diag }, + { "single-step pgm caused by iske", test_step_pgm_iske }, + { "single-step pgm caused by lctl", test_step_pgm_lctl }, + { "single-step svc", test_step_svc }, +}; + +int main(int argc, char *argv[]) +{ + int idx; + + ksft_print_header(); + ksft_set_plan(ARRAY_SIZE(testlist)); + for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) { + testlist[idx].test(); + ksft_test_result_pass("%s\n", testlist[idx].name); + } + ksft_finished(); +} -- cgit From b1e428615f154c87a94267b67e819644b50948f8 Mon Sep 17 00:00:00 2001 From: Viktor Mihajlovski Date: Tue, 15 Aug 2023 17:14:12 +0200 Subject: KVM: s390: pv: relax WARN_ONCE condition for destroy fast Destroy configuration fast may return with RC 0x104 if there are still bound APQNs in the configuration. The final cleanup will occur with the standard destroy configuration UVC as at this point in time all APQNs have been reset and thus unbound. Therefore, don't warn if RC 0x104 is reported. Signed-off-by: Viktor Mihajlovski Reviewed-by: Claudio Imbrenda Reviewed-by: Janosch Frank Reviewed-by: Steffen Eiden Reviewed-by: Michael Mueller Link: https://lore.kernel.org/r/20230815151415.379760-2-seiden@linux.ibm.com Signed-off-by: Janosch Frank Message-ID: <20230815151415.379760-2-seiden@linux.ibm.com> --- arch/s390/kvm/pv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c index 856140e9942e..beeebc11b1a1 100644 --- a/arch/s390/kvm/pv.c +++ b/arch/s390/kvm/pv.c @@ -285,7 +285,8 @@ static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc) WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM FAST: rc %x rrc %x", uvcb.header.rc, uvcb.header.rrc); - WARN_ONCE(cc, "protvirt destroy vm fast failed handle %llx rc %x rrc %x", + WARN_ONCE(cc && uvcb.header.rc != 0x104, + "protvirt destroy vm fast failed handle %llx rc %x rrc %x", kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc); /* Intended memory leak on "impossible" error */ if (!cc) -- cgit From 59a881402cc89788652fa2c2ce7421d14f131c34 Mon Sep 17 00:00:00 2001 From: Steffen Eiden Date: Tue, 15 Aug 2023 17:14:13 +0200 Subject: s390/uv: UV feature check utility Introduces a function to check the existence of an UV feature. Refactor feature bit checks to use the new function. Signed-off-by: Steffen Eiden Reviewed-by: Claudio Imbrenda Reviewed-by: Janosch Frank Signed-off-by: Janosch Frank Reviewed-by: Michael Mueller Link: https://lore.kernel.org/r/20230815151415.379760-3-seiden@linux.ibm.com Message-Id: <20230815151415.379760-3-seiden@linux.ibm.com> --- arch/s390/include/asm/uv.h | 7 +++++++ arch/s390/kernel/uv.c | 2 +- arch/s390/kvm/kvm-s390.c | 2 +- arch/s390/mm/fault.c | 2 +- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h index d2cd42bb2c26..823adfff7315 100644 --- a/arch/s390/include/asm/uv.h +++ b/arch/s390/include/asm/uv.h @@ -397,6 +397,13 @@ struct uv_info { extern struct uv_info uv_info; +static inline bool uv_has_feature(u8 feature_bit) +{ + if (feature_bit >= sizeof(uv_info.uv_feature_indications) * 8) + return false; + return test_bit_inv(feature_bit, &uv_info.uv_feature_indications); +} + #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST extern int prot_virt_guest; diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index b771f1b4cdd1..fc07bc39e698 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -258,7 +258,7 @@ static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_str * shared page from a different protected VM will automatically also * transfer its ownership. */ - if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications)) + if (uv_has_feature(BIT_UV_FEAT_MISC)) return false; if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED) return false; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index e6511608280c..813cc3d59c90 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2406,7 +2406,7 @@ static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc) struct kvm_vcpu *vcpu; /* Disable the GISA if the ultravisor does not support AIV. */ - if (!test_bit_inv(BIT_UV_FEAT_AIV, &uv_info.uv_feature_indications)) + if (!uv_has_feature(BIT_UV_FEAT_AIV)) kvm_s390_gisa_disable(kvm); kvm_for_each_vcpu(i, vcpu, kvm) { diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index dbe8394234e2..390819d03215 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -825,7 +825,7 @@ void do_secure_storage_access(struct pt_regs *regs) * reliable without the misc UV feature so we need to check * for that as well. */ - if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) && + if (uv_has_feature(BIT_UV_FEAT_MISC) && !test_bit_inv(61, ®s->int_parm_long)) { /* * When this happens, userspace did something that it -- cgit From 19c654bf05ae33ad0a2a9c039d5fe7d411a8bb06 Mon Sep 17 00:00:00 2001 From: Steffen Eiden Date: Tue, 15 Aug 2023 17:14:14 +0200 Subject: KVM: s390: Add UV feature negotiation Add a uv_feature list for pv-guests to the KVM cpu-model. The feature bits 'AP-interpretation for secure guests' and 'AP-interrupt for secure guests' are available. Signed-off-by: Steffen Eiden Reviewed-by: Janosch Frank Reviewed-by: Michael Mueller Signed-off-by: Janosch Frank Link: https://lore.kernel.org/r/20230815151415.379760-4-seiden@linux.ibm.com Message-Id: <20230815151415.379760-4-seiden@linux.ibm.com> --- arch/s390/include/asm/kvm_host.h | 2 ++ arch/s390/include/uapi/asm/kvm.h | 16 +++++++++ arch/s390/kvm/kvm-s390.c | 73 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 91 insertions(+) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 91bfecb91321..427f9528a7b6 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -817,6 +817,8 @@ struct kvm_s390_cpu_model { __u64 *fac_list; u64 cpuid; unsigned short ibc; + /* subset of available UV-features for pv-guests enabled by user space */ + struct kvm_s390_vm_cpu_uv_feat uv_feat_guest; }; typedef int (*crypto_hook)(struct kvm_vcpu *vcpu); diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index a73cf01a1606..abe926d43cbe 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -159,6 +159,22 @@ struct kvm_s390_vm_cpu_subfunc { __u8 reserved[1728]; }; +#define KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST 6 +#define KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST 7 + +#define KVM_S390_VM_CPU_UV_FEAT_NR_BITS 64 +struct kvm_s390_vm_cpu_uv_feat { + union { + struct { + __u64 : 4; + __u64 ap : 1; /* bit 4 */ + __u64 ap_intr : 1; /* bit 5 */ + __u64 : 58; + }; + __u64 feat; + }; +}; + /* kvm attributes for crypto */ #define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0 #define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 813cc3d59c90..b3f17e014cab 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1531,6 +1531,39 @@ static int kvm_s390_set_processor_subfunc(struct kvm *kvm, return 0; } +#define KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK \ +( \ + ((struct kvm_s390_vm_cpu_uv_feat){ \ + .ap = 1, \ + .ap_intr = 1, \ + }) \ + .feat \ +) + +static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) +{ + struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr; + unsigned long data, filter; + + filter = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK; + if (get_user(data, &ptr->feat)) + return -EFAULT; + if (!bitmap_subset(&data, &filter, KVM_S390_VM_CPU_UV_FEAT_NR_BITS)) + return -EINVAL; + + mutex_lock(&kvm->lock); + if (kvm->created_vcpus) { + mutex_unlock(&kvm->lock); + return -EBUSY; + } + kvm->arch.model.uv_feat_guest.feat = data; + mutex_unlock(&kvm->lock); + + VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data); + + return 0; +} + static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) { int ret = -ENXIO; @@ -1545,6 +1578,9 @@ static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC: ret = kvm_s390_set_processor_subfunc(kvm, attr); break; + case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST: + ret = kvm_s390_set_uv_feat(kvm, attr); + break; } return ret; } @@ -1777,6 +1813,33 @@ static int kvm_s390_get_machine_subfunc(struct kvm *kvm, return 0; } +static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) +{ + struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; + unsigned long feat = kvm->arch.model.uv_feat_guest.feat; + + if (put_user(feat, &dst->feat)) + return -EFAULT; + VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); + + return 0; +} + +static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) +{ + struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; + unsigned long feat; + + BUILD_BUG_ON(sizeof(*dst) != sizeof(uv_info.uv_feature_indications)); + + feat = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK; + if (put_user(feat, &dst->feat)) + return -EFAULT; + VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); + + return 0; +} + static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) { int ret = -ENXIO; @@ -1800,6 +1863,12 @@ static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) case KVM_S390_VM_CPU_MACHINE_SUBFUNC: ret = kvm_s390_get_machine_subfunc(kvm, attr); break; + case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST: + ret = kvm_s390_get_processor_uv_feat(kvm, attr); + break; + case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST: + ret = kvm_s390_get_machine_uv_feat(kvm, attr); + break; } return ret; } @@ -1952,6 +2021,8 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) case KVM_S390_VM_CPU_MACHINE_FEAT: case KVM_S390_VM_CPU_MACHINE_SUBFUNC: case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC: + case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST: + case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST: ret = 0; break; default: @@ -3296,6 +3367,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); kvm->arch.model.ibc = sclp.ibc & 0x0fff; + kvm->arch.model.uv_feat_guest.feat = 0; + kvm_s390_crypto_init(kvm); if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) { -- cgit From 899e2206f46aece42d8194c350bc1de71344dbc7 Mon Sep 17 00:00:00 2001 From: Steffen Eiden Date: Tue, 15 Aug 2023 17:14:15 +0200 Subject: KVM: s390: pv: Allow AP-instructions for pv-guests Introduces new feature bits and enablement flags for AP and AP IRQ support. Signed-off-by: Steffen Eiden Reviewed-by: Janosch Frank Reviewed-by: Michael Mueller Signed-off-by: Janosch Frank Link: https://lore.kernel.org/r/20230815151415.379760-5-seiden@linux.ibm.com Message-Id: <20230815151415.379760-5-seiden@linux.ibm.com> --- arch/s390/include/asm/uv.h | 12 +++++++++++- arch/s390/kvm/pv.c | 6 ++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h index 823adfff7315..0e7bd3873907 100644 --- a/arch/s390/include/asm/uv.h +++ b/arch/s390/include/asm/uv.h @@ -99,6 +99,8 @@ enum uv_cmds_inst { enum uv_feat_ind { BIT_UV_FEAT_MISC = 0, BIT_UV_FEAT_AIV = 1, + BIT_UV_FEAT_AP = 4, + BIT_UV_FEAT_AP_INTR = 5, }; struct uv_cb_header { @@ -159,7 +161,15 @@ struct uv_cb_cgc { u64 guest_handle; u64 conf_base_stor_origin; u64 conf_virt_stor_origin; - u64 reserved30; + u8 reserved30[6]; + union { + struct { + u16 : 14; + u16 ap_instr_intr : 1; + u16 ap_allow_instr : 1; + }; + u16 raw; + } flags; u64 guest_stor_origin; u64 guest_stor_len; u64 guest_sca; diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c index beeebc11b1a1..22fb482f042d 100644 --- a/arch/s390/kvm/pv.c +++ b/arch/s390/kvm/pv.c @@ -572,12 +572,14 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) uvcb.conf_base_stor_origin = virt_to_phys((void *)kvm->arch.pv.stor_base); uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var; + uvcb.flags.ap_allow_instr = kvm->arch.model.uv_feat_guest.ap; + uvcb.flags.ap_instr_intr = kvm->arch.model.uv_feat_guest.ap_intr; cc = uv_call_sched(0, (u64)&uvcb); *rc = uvcb.header.rc; *rrc = uvcb.header.rrc; - KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x", - uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc); + KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x flags %04x", + uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc, uvcb.flags.raw); /* Outputs */ kvm->arch.pv.handle = uvcb.guest_handle; -- cgit From 50011c2a245792993f2756e5b5b571512bfa409e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 24 Aug 2023 18:45:32 -0700 Subject: KVM: VMX: Refresh available regs and IDT vectoring info before NMI handling Reset the mask of available "registers" and refresh the IDT vectoring info snapshot in vmx_vcpu_enter_exit(), before KVM potentially handles a an NMI VM-Exit. One of the "registers" that KVM VMX lazily loads is the vmcs.VM_EXIT_INTR_INFO field, which is holds the vector+type on "exception or NMI" VM-Exits, i.e. is needed to identify NMIs. Clearing the available registers bitmask after handling NMIs results in KVM querying info from the last VM-Exit that read vmcs.VM_EXIT_INTR_INFO, and leads to both missed NMIs and spurious NMIs in the host. Opportunistically grab vmcs.IDT_VECTORING_INFO_FIELD early in the VM-Exit path too, e.g. to guard against similar consumption of stale data. The field is read on every "normal" VM-Exit, and there's no point in delaying the inevitable. Reported-by: Like Xu Fixes: 11df586d774f ("KVM: VMX: Handle NMI VM-Exits in noinstr region") Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230825014532.2846714-1-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b0093b8a72d2..7625d74c056a 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7209,13 +7209,20 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, flags); vcpu->arch.cr2 = native_read_cr2(); + vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET; + + vmx->idt_vectoring_info = 0; vmx_enable_fb_clear(vmx); - if (unlikely(vmx->fail)) + if (unlikely(vmx->fail)) { vmx->exit_reason.full = 0xdead; - else - vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON); + goto out; + } + + vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON); + if (likely(!vmx->exit_reason.failed_vmentry)) + vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); if ((u16)vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI && is_nmi(vmx_get_intr_info(vcpu))) { @@ -7224,6 +7231,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, kvm_after_interrupt(vcpu); } +out: guest_state_exit_irqoff(); } @@ -7345,8 +7353,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) loadsegment(es, __USER_DS); #endif - vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET; - pt_guest_exit(vmx); kvm_load_host_xsave_state(vcpu); @@ -7363,17 +7369,12 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx->nested.nested_run_pending = 0; } - vmx->idt_vectoring_info = 0; - if (unlikely(vmx->fail)) return EXIT_FASTPATH_NONE; if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY)) kvm_machine_check(); - if (likely(!vmx->exit_reason.failed_vmentry)) - vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); - trace_kvm_exit(vcpu, KVM_ISA_VMX); if (unlikely(vmx->exit_reason.failed_vmentry)) -- cgit From 91303f800e76517d1e96d47dec290c5d5dbf1230 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Tue, 27 Jun 2023 12:26:39 +0800 Subject: KVM: x86/mmu: Move the lockdep_assert of mmu_lock to inside clear_dirty_pt_masked() Move the lockdep_assert_held_write(&kvm->mmu_lock) from the only one caller kvm_tdp_mmu_clear_dirty_pt_masked() to inside clear_dirty_pt_masked(). This change makes it more obvious why it's safe for clear_dirty_pt_masked() to use the non-atomic (for non-volatile SPTEs) tdp_mmu_clear_spte_bits() helper. for_each_tdp_mmu_root() does its own lockdep, so the only "loss" in lockdep coverage is if the list is completely empty. Suggested-by: Sean Christopherson Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20230627042639.12636-1-likexu@tencent.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 6250bd3d20c1..639c5f2c88f8 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1600,6 +1600,8 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, shadow_dirty_mask; struct tdp_iter iter; + lockdep_assert_held_write(&kvm->mmu_lock); + rcu_read_lock(); tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), @@ -1646,7 +1648,6 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, { struct kvm_mmu_page *root; - lockdep_assert_held_write(&kvm->mmu_lock); for_each_tdp_mmu_root(kvm, root, slot->as_id) clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); } -- cgit From d09f711233a43869725abc44102117d2a8fae6fe Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 15:37:11 -0700 Subject: KVM: x86/mmu: Guard against collision with KVM-defined PFERR_IMPLICIT_ACCESS Add an assertion in kvm_mmu_page_fault() to ensure the error code provided by hardware doesn't conflict with KVM's software-defined IMPLICIT_ACCESS flag. In the unlikely scenario that future hardware starts using bit 48 for a hardware-defined flag, preserving the bit could result in KVM incorrectly interpreting the unknown flag as KVM's IMPLICIT_ACCESS flag. WARN so that any such conflict can be surfaced to KVM developers and resolved, but otherwise ignore the bit as KVM can't possibly rely on a flag it knows nothing about. Fixes: 4f4aa80e3b88 ("KVM: X86: Handle implicit supervisor access with SMAP") Acked-by: Kai Huang Reviewed-by: Paolo Bonzini Link: https://lore.kernel.org/r/20230721223711.2334426-1-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 276157f8496c..862604b6e6a3 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5724,6 +5724,17 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err int r, emulation_type = EMULTYPE_PF; bool direct = vcpu->arch.mmu->root_role.direct; + /* + * IMPLICIT_ACCESS is a KVM-defined flag used to correctly perform SMAP + * checks when emulating instructions that triggers implicit access. + * WARN if hardware generates a fault with an error code that collides + * with the KVM-defined value. Clear the flag and continue on, i.e. + * don't terminate the VM, as KVM can't possibly be relying on a flag + * that KVM doesn't know about. + */ + if (WARN_ON_ONCE(error_code & PFERR_IMPLICIT_ACCESS)) + error_code &= ~PFERR_IMPLICIT_ACCESS; + if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root.hpa))) return RET_PF_RETRY; -- cgit From a98b889492a61a0220f26d8692ec744f1830a172 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:47:11 -0700 Subject: KVM: x86/mmu: Delete pgprintk() and all its usage Delete KVM's pgprintk() and all its usage, as the code is very prone to bitrot due to being buried behind MMU_DEBUG, and the functionality has been rendered almost entirely obsolete by the tracepoints KVM has gained over the years. And for the situations where the information provided by KVM's tracepoints is insufficient, pgprintk() rarely fills in the gaps, and is almost always far too noisy, i.e. developers end up implementing custom prints anyways. Link: https://lore.kernel.org/r/20230729004722.1056172-2-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 17 ----------------- arch/x86/kvm/mmu/mmu_internal.h | 2 -- arch/x86/kvm/mmu/paging_tmpl.h | 7 ------- arch/x86/kvm/mmu/spte.c | 2 -- 4 files changed, 28 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 862604b6e6a3..63a8b82120fe 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2771,12 +2771,9 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) LIST_HEAD(invalid_list); int r; - pgprintk("%s: looking for gfn %llx\n", __func__, gfn); r = 0; write_lock(&kvm->mmu_lock); for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) { - pgprintk("%s: gfn %llx role %x\n", __func__, gfn, - sp->role.word); r = 1; kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); } @@ -2934,9 +2931,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, bool prefetch = !fault || fault->prefetch; bool write_fault = fault && fault->write; - pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, - *sptep, write_fault, gfn); - if (unlikely(is_noslot_pfn(pfn))) { vcpu->stat.pf_mmio_spte_created++; mark_mmio_spte(vcpu, sptep, gfn, pte_access); @@ -2956,8 +2950,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, drop_parent_pte(child, sptep); flush = true; } else if (pfn != spte_to_pfn(*sptep)) { - pgprintk("hfn old %llx new %llx\n", - spte_to_pfn(*sptep), pfn); drop_spte(vcpu->kvm, sptep); flush = true; } else @@ -2982,8 +2974,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, if (flush) kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level); - pgprintk("%s: setting spte %llx\n", __func__, *sptep); - if (!was_rmapped) { WARN_ON_ONCE(ret == RET_PF_SPURIOUS); rmap_add(vcpu, slot, sptep, gfn, pte_access); @@ -4439,8 +4429,6 @@ out_unlock: static int nonpaging_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { - pgprintk("%s: gva %lx error %x\n", __func__, fault->addr, fault->error_code); - /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */ fault->max_level = PG_LEVEL_2M; return direct_page_fault(vcpu, fault); @@ -5616,9 +5604,6 @@ static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, { unsigned offset, pte_size, misaligned; - pgprintk("misaligned: gpa %llx bytes %d role %x\n", - gpa, bytes, sp->role.word); - offset = offset_in_page(gpa); pte_size = sp->role.has_4_byte_gpte ? 4 : 8; @@ -5684,8 +5669,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) return; - pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); - write_lock(&vcpu->kvm->mmu_lock); gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 86cb83bb3480..2d6bbf35520f 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -11,11 +11,9 @@ #ifdef MMU_DEBUG extern bool dbg; -#define pgprintk(x...) do { if (dbg) printk(x); } while (0) #define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0) #define MMU_WARN_ON(x) WARN_ON(x) #else -#define pgprintk(x...) do { } while (0) #define rmap_printk(x...) do { } while (0) #define MMU_WARN_ON(x) do { } while (0) #endif diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 0662e0278e70..7a97f769a7cb 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -456,9 +456,6 @@ retry_walk: goto retry_walk; } - pgprintk("%s: pte %llx pte_access %x pt_access %x\n", - __func__, (u64)pte, walker->pte_access, - walker->pt_access[walker->level - 1]); return 1; error: @@ -529,8 +526,6 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) return false; - pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); - gfn = gpte_to_gfn(gpte); pte_access = sp->role.access & FNAME(gpte_access)(gpte); FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); @@ -758,7 +753,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault struct guest_walker walker; int r; - pgprintk("%s: addr %lx err %x\n", __func__, fault->addr, fault->error_code); WARN_ON_ONCE(fault->is_tdp); /* @@ -773,7 +767,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault * The page is not mapped by the guest. Let the guest handle it. */ if (!r) { - pgprintk("%s: guest page fault\n", __func__); if (!fault->prefetch) kvm_inject_emulated_page_fault(vcpu, &walker.fault); diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index cf2c6426a6fc..438a86bda9f3 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -221,8 +221,6 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, * shadow pages and unsync'ing pages is not allowed. */ if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) { - pgprintk("%s: found shadow page for %llx, marking ro\n", - __func__, gfn); wrprot = true; pte_access &= ~ACC_WRITE_MASK; spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); -- cgit From 350c49fdea222193e886ddfa8cc55989853a05f5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:47:12 -0700 Subject: KVM: x86/mmu: Delete rmap_printk() and all its usage Delete rmap_printk() so that MMU_WARN_ON() and MMU_DEBUG can be morphed into something that can be regularly enabled for debug kernels. The information provided by rmap_printk() isn't all that useful now that the rmap and unsync code is mature, as the prints are simultaneously too verbose (_lots_ of message) and yet not verbose enough to be helpful for debug (most instances print just the SPTE pointer/value, which is rarely sufficient to root cause anything but trivial bugs). Alternatively, rmap_printk() could be reworked to into tracepoints, but it's not clear there is a real need as rmap bugs rarely escape initial development, and when bugs do escape to production, they are often edge cases and/or reside in code that isn't directly related to the rmaps. In other words, the problems with rmap_printk() being unhelpful also apply to tracepoints. And deleting rmap_printk() doesn't preclude adding tracepoints in the future. Link: https://lore.kernel.org/r/20230729004722.1056172-3-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 12 ------------ arch/x86/kvm/mmu/mmu_internal.h | 2 -- 2 files changed, 14 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 63a8b82120fe..d8c060071c2b 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -937,10 +937,8 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte, int count = 0; if (!rmap_head->val) { - rmap_printk("%p %llx 0->1\n", spte, *spte); rmap_head->val = (unsigned long)spte; } else if (!(rmap_head->val & 1)) { - rmap_printk("%p %llx 1->many\n", spte, *spte); desc = kvm_mmu_memory_cache_alloc(cache); desc->sptes[0] = (u64 *)rmap_head->val; desc->sptes[1] = spte; @@ -949,7 +947,6 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte, rmap_head->val = (unsigned long)desc | 1; ++count; } else { - rmap_printk("%p %llx many->many\n", spte, *spte); desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); count = desc->tail_count + desc->spte_count; @@ -1014,14 +1011,12 @@ static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) pr_err("%s: %p 0->BUG\n", __func__, spte); BUG(); } else if (!(rmap_head->val & 1)) { - rmap_printk("%p 1->0\n", spte); if ((u64 *)rmap_head->val != spte) { pr_err("%s: %p 1->BUG\n", __func__, spte); BUG(); } rmap_head->val = 0; } else { - rmap_printk("%p many->many\n", spte); desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); while (desc) { for (i = 0; i < desc->spte_count; ++i) { @@ -1237,8 +1232,6 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect) !(pt_protect && is_mmu_writable_spte(spte))) return false; - rmap_printk("spte %p %llx\n", sptep, *sptep); - if (pt_protect) spte &= ~shadow_mmu_writable_mask; spte = spte & ~PT_WRITABLE_MASK; @@ -1263,8 +1256,6 @@ static bool spte_clear_dirty(u64 *sptep) { u64 spte = *sptep; - rmap_printk("spte %p %llx\n", sptep, *sptep); - MMU_WARN_ON(!spte_ad_enabled(spte)); spte &= ~shadow_dirty_mask; return mmu_spte_update(sptep, spte); @@ -1476,9 +1467,6 @@ static bool kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, restart: for_each_rmap_spte(rmap_head, &iter, sptep) { - rmap_printk("spte %p %llx gfn %llx (%d)\n", - sptep, *sptep, gfn, level); - need_flush = true; if (pte_write(pte)) { diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 2d6bbf35520f..01c906e1d16d 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -11,10 +11,8 @@ #ifdef MMU_DEBUG extern bool dbg; -#define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0) #define MMU_WARN_ON(x) WARN_ON(x) #else -#define rmap_printk(x...) do { } while (0) #define MMU_WARN_ON(x) do { } while (0) #endif -- cgit From c4f92cfe021d9263a554f8adcd7c49ef52e485bb Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:47:13 -0700 Subject: KVM: x86/mmu: Delete the "dbg" module param Delete KVM's "dbg" module param now that its usage in KVM is gone (it used to guard pgprintk() and rmap_printk()). Link: https://lore.kernel.org/r/20230729004722.1056172-4-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 5 ----- arch/x86/kvm/mmu/mmu_internal.h | 2 -- 2 files changed, 7 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index d8c060071c2b..8e0b7ad64333 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -115,11 +115,6 @@ static int max_huge_page_level __read_mostly; static int tdp_root_level __read_mostly; static int max_tdp_level __read_mostly; -#ifdef MMU_DEBUG -bool dbg = 0; -module_param(dbg, bool, 0644); -#endif - #define PTE_PREFETCH_NUM 8 #include diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 01c906e1d16d..9599e7eb6d85 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -9,8 +9,6 @@ #undef MMU_DEBUG #ifdef MMU_DEBUG -extern bool dbg; - #define MMU_WARN_ON(x) WARN_ON(x) #else #define MMU_WARN_ON(x) do { } while (0) -- cgit From 242a6dd8daddbc718a3ad585ce4dce042c13e208 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:47:14 -0700 Subject: KVM: x86/mmu: Avoid pointer arithmetic when iterating over SPTEs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the pointer arithmetic used to iterate over SPTEs in is_empty_shadow_page() with more standard interger-based iteration. No functional change intended. Reviewed-by: Philippe Mathieu-Daudé Link: https://lore.kernel.org/r/20230729004722.1056172-5-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 8e0b7ad64333..3749ccdef6ce 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1692,15 +1692,15 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) #ifdef MMU_DEBUG static int is_empty_shadow_page(u64 *spt) { - u64 *pos; - u64 *end; + int i; - for (pos = spt, end = pos + SPTE_ENT_PER_PAGE; pos != end; pos++) - if (is_shadow_present_pte(*pos)) { + for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { + if (is_shadow_present_pte(spt[i])) { printk(KERN_ERR "%s: %p %llx\n", __func__, - pos, *pos); + &spt[i], spt[i]); return 0; } + } return 1; } #endif -- cgit From 58da926caad9c6ecba70202a3775f2fd9b476cfc Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:47:15 -0700 Subject: KVM: x86/mmu: Cleanup sanity check of SPTEs at SP free Massage the error message for the sanity check on SPTEs when freeing a shadow page to be more verbose, and to print out all shadow-present SPTEs, not just the first SPTE encountered. Printing all SPTEs can be quite valuable for debug, e.g. highlights whether the leak is a one-off or widepsread, or possibly the result of memory corruption (something else in the kernel stomping on KVM's SPTEs). Opportunistically move the MMU_WARN_ON() into the helper itself, which will allow a future cleanup to use BUILD_BUG_ON_INVALID() as the stub for MMU_WARN_ON(). BUILD_BUG_ON_INVALID() works as intended and results in the compiler complaining about is_empty_shadow_page() not being declared. Link: https://lore.kernel.org/r/20230729004722.1056172-6-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 3749ccdef6ce..cb3adaa69350 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1689,21 +1689,19 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) return young; } -#ifdef MMU_DEBUG -static int is_empty_shadow_page(u64 *spt) +static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp) { +#ifdef MMU_DEBUG int i; for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { - if (is_shadow_present_pte(spt[i])) { - printk(KERN_ERR "%s: %p %llx\n", __func__, - &spt[i], spt[i]); - return 0; - } + if (MMU_WARN_ON(is_shadow_present_pte(sp->spt[i]))) + pr_err_ratelimited("SPTE %llx (@ %p) for gfn %llx shadow-present at free", + sp->spt[i], &sp->spt[i], + kvm_mmu_page_get_gfn(sp, i)); } - return 1; -} #endif +} /* * This value is the sum of all of the kvm instances's @@ -1731,7 +1729,8 @@ static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp) { - MMU_WARN_ON(!is_empty_shadow_page(sp->spt)); + kvm_mmu_check_sptes_at_free(sp); + hlist_del(&sp->hash_link); list_del(&sp->link); free_page((unsigned long)sp->spt); -- cgit From 0fe6370eb3d5603e2e41bfca00043ed8b8f90cfb Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:47:16 -0700 Subject: KVM: x86/mmu: Rename MMU_WARN_ON() to KVM_MMU_WARN_ON() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename MMU_WARN_ON() to make it super obvious that the assertions are all about KVM's MMU, not the primary MMU. Reviewed-by: Philippe Mathieu-Daudé Link: https://lore.kernel.org/r/20230729004722.1056172-7-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 4 ++-- arch/x86/kvm/mmu/mmu_internal.h | 4 ++-- arch/x86/kvm/mmu/spte.h | 8 ++++---- arch/x86/kvm/mmu/tdp_mmu.c | 8 ++++---- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index cb3adaa69350..8dc40d023742 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1251,7 +1251,7 @@ static bool spte_clear_dirty(u64 *sptep) { u64 spte = *sptep; - MMU_WARN_ON(!spte_ad_enabled(spte)); + KVM_MMU_WARN_ON(!spte_ad_enabled(spte)); spte &= ~shadow_dirty_mask; return mmu_spte_update(sptep, spte); } @@ -1695,7 +1695,7 @@ static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp) int i; for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { - if (MMU_WARN_ON(is_shadow_present_pte(sp->spt[i]))) + if (KVM_MMU_WARN_ON(is_shadow_present_pte(sp->spt[i]))) pr_err_ratelimited("SPTE %llx (@ %p) for gfn %llx shadow-present at free", sp->spt[i], &sp->spt[i], kvm_mmu_page_get_gfn(sp, i)); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 9599e7eb6d85..d5ef61635b94 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -9,9 +9,9 @@ #undef MMU_DEBUG #ifdef MMU_DEBUG -#define MMU_WARN_ON(x) WARN_ON(x) +#define KVM_MMU_WARN_ON(x) WARN_ON(x) #else -#define MMU_WARN_ON(x) do { } while (0) +#define KVM_MMU_WARN_ON(x) do { } while (0) #endif /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */ diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 1279db2eab44..83e6614f3720 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -265,13 +265,13 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) static inline bool spte_ad_enabled(u64 spte) { - MMU_WARN_ON(!is_shadow_present_pte(spte)); + KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_DISABLED; } static inline bool spte_ad_need_write_protect(u64 spte) { - MMU_WARN_ON(!is_shadow_present_pte(spte)); + KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); /* * This is benign for non-TDP SPTEs as SPTE_TDP_AD_ENABLED is '0', * and non-TDP SPTEs will never set these bits. Optimize for 64-bit @@ -282,13 +282,13 @@ static inline bool spte_ad_need_write_protect(u64 spte) static inline u64 spte_shadow_accessed_mask(u64 spte) { - MMU_WARN_ON(!is_shadow_present_pte(spte)); + KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); return spte_ad_enabled(spte) ? shadow_accessed_mask : 0; } static inline u64 spte_shadow_dirty_mask(u64 spte) { - MMU_WARN_ON(!is_shadow_present_pte(spte)); + KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); return spte_ad_enabled(spte) ? shadow_dirty_mask : 0; } diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 639c5f2c88f8..e54d5f442426 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1548,8 +1548,8 @@ retry: if (!is_shadow_present_pte(iter.old_spte)) continue; - MMU_WARN_ON(kvm_ad_enabled() && - spte_ad_need_write_protect(iter.old_spte)); + KVM_MMU_WARN_ON(kvm_ad_enabled() && + spte_ad_need_write_protect(iter.old_spte)); if (!(iter.old_spte & dbit)) continue; @@ -1609,8 +1609,8 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, if (!mask) break; - MMU_WARN_ON(kvm_ad_enabled() && - spte_ad_need_write_protect(iter.old_spte)); + KVM_MMU_WARN_ON(kvm_ad_enabled() && + spte_ad_need_write_protect(iter.old_spte)); if (iter.level > PG_LEVEL_4K || !(mask & (1UL << (iter.gfn - gfn)))) -- cgit From 20ba462dfda6a95cb3bfb5577da813acf3dc4b40 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:47:17 -0700 Subject: KVM: x86/mmu: Convert "runtime" WARN_ON() assertions to WARN_ON_ONCE() Convert all "runtime" assertions, i.e. assertions that can be triggered while running vCPUs, from WARN_ON() to WARN_ON_ONCE(). Every WARN in the MMU that is tied to running vCPUs, i.e. not contained to loading and initializing KVM, is likely to fire _a lot_ when it does trigger. E.g. if KVM ends up with a bug that causes a root to be invalidated before the page fault handler is invoked, pretty much _every_ page fault VM-Exit triggers the WARN. If a WARN is triggered frequently, the resulting spam usually causes a lot of damage of its own, e.g. consumes resources to log the WARN and pollutes the kernel log, often to the point where other useful information can be lost. In many case, the damage caused by the spam is actually worse than the bug itself, e.g. KVM can almost always recover from an unexpectedly invalid root. On the flip side, warning every time is rarely helpful for debug and triage, i.e. a single splat is usually sufficient to point a debugger in the right direction, and automated testing, e.g. syzkaller, typically runs with warn_on_panic=1, i.e. will never get past the first WARN anyways. Lastly, when an assertions fails multiple times, the stack traces in KVM are almost always identical, i.e. the full splat only needs to be captured once. And _if_ there is value in captruing information about the failed assert, a ratelimited printk() is sufficient and less likely to rack up a large amount of collateral damage. Link: https://lore.kernel.org/r/20230729004722.1056172-8-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 48 ++++++++++++++++++++--------------------- arch/x86/kvm/mmu/mmu_internal.h | 2 +- arch/x86/kvm/mmu/page_track.c | 16 +++++++------- arch/x86/kvm/mmu/paging_tmpl.h | 4 ++-- arch/x86/kvm/mmu/spte.c | 4 ++-- arch/x86/kvm/mmu/tdp_iter.c | 4 ++-- arch/x86/kvm/mmu/tdp_mmu.c | 20 ++++++++--------- 7 files changed, 49 insertions(+), 49 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 8dc40d023742..ab3325981f9a 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -481,7 +481,7 @@ retry: */ static void mmu_spte_set(u64 *sptep, u64 new_spte) { - WARN_ON(is_shadow_present_pte(*sptep)); + WARN_ON_ONCE(is_shadow_present_pte(*sptep)); __set_spte(sptep, new_spte); } @@ -493,7 +493,7 @@ static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte) { u64 old_spte = *sptep; - WARN_ON(!is_shadow_present_pte(new_spte)); + WARN_ON_ONCE(!is_shadow_present_pte(new_spte)); check_spte_writable_invariants(new_spte); if (!is_shadow_present_pte(old_spte)) { @@ -506,7 +506,7 @@ static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte) else old_spte = __update_clear_spte_slow(sptep, new_spte); - WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte)); + WARN_ON_ONCE(spte_to_pfn(old_spte) != spte_to_pfn(new_spte)); return old_spte; } @@ -588,7 +588,7 @@ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep) * by a refcounted page, the refcount is elevated. */ page = kvm_pfn_to_refcounted_page(pfn); - WARN_ON(page && !page_count(page)); + WARN_ON_ONCE(page && !page_count(page)); if (is_accessed_spte(old_spte)) kvm_set_pfn_accessed(pfn); @@ -803,7 +803,7 @@ static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot, for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { linfo = lpage_info_slot(gfn, slot, i); linfo->disallow_lpage += count; - WARN_ON(linfo->disallow_lpage < 0); + WARN_ON_ONCE(linfo->disallow_lpage < 0); } } @@ -1198,7 +1198,7 @@ static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush) struct kvm_mmu_page *sp; sp = sptep_to_sp(sptep); - WARN_ON(sp->role.level == PG_LEVEL_4K); + WARN_ON_ONCE(sp->role.level == PG_LEVEL_4K); drop_spte(kvm, sptep); @@ -1457,7 +1457,7 @@ static bool kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, u64 new_spte; kvm_pfn_t new_pfn; - WARN_ON(pte_huge(pte)); + WARN_ON_ONCE(pte_huge(pte)); new_pfn = pte_pfn(pte); restart: @@ -1818,7 +1818,7 @@ static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx) { --sp->unsync_children; - WARN_ON((int)sp->unsync_children < 0); + WARN_ON_ONCE((int)sp->unsync_children < 0); __clear_bit(idx, sp->unsync_child_bitmap); } @@ -1876,7 +1876,7 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp, static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) { - WARN_ON(!sp->unsync); + WARN_ON_ONCE(!sp->unsync); trace_kvm_mmu_sync_page(sp); sp->unsync = 0; --kvm->stat.mmu_unsync; @@ -2051,11 +2051,11 @@ static int mmu_pages_first(struct kvm_mmu_pages *pvec, if (pvec->nr == 0) return 0; - WARN_ON(pvec->page[0].idx != INVALID_INDEX); + WARN_ON_ONCE(pvec->page[0].idx != INVALID_INDEX); sp = pvec->page[0].sp; level = sp->role.level; - WARN_ON(level == PG_LEVEL_4K); + WARN_ON_ONCE(level == PG_LEVEL_4K); parents->parent[level-2] = sp; @@ -2077,7 +2077,7 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents) if (!sp) return; - WARN_ON(idx == INVALID_INDEX); + WARN_ON_ONCE(idx == INVALID_INDEX); clear_unsync_child_bit(sp, idx); level++; } while (!sp->unsync_children); @@ -2198,7 +2198,7 @@ static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm, if (ret < 0) break; - WARN_ON(!list_empty(&invalid_list)); + WARN_ON_ONCE(!list_empty(&invalid_list)); if (ret > 0) kvm_flush_remote_tlbs(kvm); } @@ -2653,7 +2653,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, kvm_flush_remote_tlbs(kvm); list_for_each_entry_safe(sp, nsp, invalid_list, link) { - WARN_ON(!sp->role.invalid || sp->root_count); + WARN_ON_ONCE(!sp->role.invalid || sp->root_count); kvm_mmu_free_shadow_page(sp); } } @@ -2848,7 +2848,7 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, continue; } - WARN_ON(sp->role.level != PG_LEVEL_4K); + WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K); kvm_unsync_page(kvm, sp); } if (locked) @@ -3001,7 +3001,7 @@ static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *spte, *start = NULL; int i; - WARN_ON(!sp->role.direct); + WARN_ON_ONCE(!sp->role.direct); i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1); spte = sp->spt + i; @@ -3547,7 +3547,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, * SPTE to ensure any non-PA bits are dropped. */ sp = spte_to_child_sp(*root_hpa); - if (WARN_ON(!sp)) + if (WARN_ON_ONCE(!sp)) return; if (is_tdp_mmu_page(sp)) @@ -4162,7 +4162,7 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) return RET_PF_EMULATE; reserved = get_mmio_spte(vcpu, addr, &spte); - if (WARN_ON(reserved)) + if (WARN_ON_ONCE(reserved)) return -EINVAL; if (is_mmio_spte(spte)) { @@ -5483,9 +5483,9 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL); - WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root.hpa)); + WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa)); kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); - WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa)); + WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa)); vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); } @@ -5700,7 +5700,7 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err if (WARN_ON_ONCE(error_code & PFERR_IMPLICIT_ACCESS)) error_code &= ~PFERR_IMPLICIT_ACCESS; - if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root.hpa))) + if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa))) return RET_PF_RETRY; r = RET_PF_INVALID; @@ -6057,7 +6057,7 @@ restart: * pages. Skip the bogus page, otherwise we'll get stuck in an * infinite loop if the page gets put back on the list (again). */ - if (WARN_ON(sp->role.invalid)) + if (WARN_ON_ONCE(sp->role.invalid)) continue; /* @@ -6685,7 +6685,7 @@ void kvm_mmu_zap_all(struct kvm *kvm) write_lock(&kvm->mmu_lock); restart: list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { - if (WARN_ON(sp->role.invalid)) + if (WARN_ON_ONCE(sp->role.invalid)) continue; if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) goto restart; @@ -6703,7 +6703,7 @@ restart: void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) { - WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); + WARN_ON_ONCE(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); gen &= MMIO_SPTE_GEN_MASK; diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index d5ef61635b94..6b6cd30bcf4b 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -9,7 +9,7 @@ #undef MMU_DEBUG #ifdef MMU_DEBUG -#define KVM_MMU_WARN_ON(x) WARN_ON(x) +#define KVM_MMU_WARN_ON(x) WARN_ON_ONCE(x) #else #define KVM_MMU_WARN_ON(x) do { } while (0) #endif diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 0a2ac438d647..fd16918b3a7a 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -94,7 +94,7 @@ static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, val = slot->arch.gfn_track[mode][index]; - if (WARN_ON(val + count < 0 || val + count > USHRT_MAX)) + if (WARN_ON_ONCE(val + count < 0 || val + count > USHRT_MAX)) return; slot->arch.gfn_track[mode][index] += count; @@ -117,11 +117,11 @@ void kvm_slot_page_track_add_page(struct kvm *kvm, enum kvm_page_track_mode mode) { - if (WARN_ON(!page_track_mode_is_valid(mode))) + if (WARN_ON_ONCE(!page_track_mode_is_valid(mode))) return; - if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE && - !kvm_page_track_write_tracking_enabled(kvm))) + if (WARN_ON_ONCE(mode == KVM_PAGE_TRACK_WRITE && + !kvm_page_track_write_tracking_enabled(kvm))) return; update_gfn_track(slot, gfn, mode, 1); @@ -155,11 +155,11 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode) { - if (WARN_ON(!page_track_mode_is_valid(mode))) + if (WARN_ON_ONCE(!page_track_mode_is_valid(mode))) return; - if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE && - !kvm_page_track_write_tracking_enabled(kvm))) + if (WARN_ON_ONCE(mode == KVM_PAGE_TRACK_WRITE && + !kvm_page_track_write_tracking_enabled(kvm))) return; update_gfn_track(slot, gfn, mode, -1); @@ -181,7 +181,7 @@ bool kvm_slot_page_track_is_active(struct kvm *kvm, { int index; - if (WARN_ON(!page_track_mode_is_valid(mode))) + if (WARN_ON_ONCE(!page_track_mode_is_valid(mode))) return false; if (!slot) diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 7a97f769a7cb..a3fc7c1a7f8d 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -633,7 +633,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, if (FNAME(gpte_changed)(vcpu, gw, top_level)) goto out_gpte_changed; - if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root.hpa))) + if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa))) goto out_gpte_changed; for_each_shadow_entry(vcpu, fault->addr, it) { @@ -830,7 +830,7 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) { int offset = 0; - WARN_ON(sp->role.level != PG_LEVEL_4K); + WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K); if (PTTYPE == 32) offset = sp->role.quadrant << SPTE_LEVEL_BITS; diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 438a86bda9f3..4a599130e9c9 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -61,7 +61,7 @@ static u64 generation_mmio_spte_mask(u64 gen) { u64 mask; - WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); + WARN_ON_ONCE(gen & ~MMIO_SPTE_GEN_MASK); mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK; mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK; @@ -240,7 +240,7 @@ out: if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) { /* Enforced by kvm_mmu_hugepage_adjust. */ - WARN_ON(level > PG_LEVEL_4K); + WARN_ON_ONCE(level > PG_LEVEL_4K); mark_page_dirty_in_slot(vcpu->kvm, slot, gfn); } diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c index d2eb0d4f8710..5bb09f8d9fc6 100644 --- a/arch/x86/kvm/mmu/tdp_iter.c +++ b/arch/x86/kvm/mmu/tdp_iter.c @@ -41,8 +41,8 @@ void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root, { int root_level = root->role.level; - WARN_ON(root_level < 1); - WARN_ON(root_level > PT64_ROOT_MAX_LEVEL); + WARN_ON_ONCE(root_level < 1); + WARN_ON_ONCE(root_level > PT64_ROOT_MAX_LEVEL); iter->next_last_level_gfn = next_last_level_gfn; iter->root_level = root_level; diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index e54d5f442426..fda32a72b406 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -475,9 +475,9 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, bool is_leaf = is_present && is_last_spte(new_spte, level); bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); - WARN_ON(level > PT64_ROOT_MAX_LEVEL); - WARN_ON(level < PG_LEVEL_4K); - WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); + WARN_ON_ONCE(level > PT64_ROOT_MAX_LEVEL); + WARN_ON_ONCE(level < PG_LEVEL_4K); + WARN_ON_ONCE(gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); /* * If this warning were to trigger it would indicate that there was a @@ -522,9 +522,9 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, * impact the guest since both the former and current SPTEs * are nonpresent. */ - if (WARN_ON(!is_mmio_spte(old_spte) && - !is_mmio_spte(new_spte) && - !is_removed_spte(new_spte))) + if (WARN_ON_ONCE(!is_mmio_spte(old_spte) && + !is_mmio_spte(new_spte) && + !is_removed_spte(new_spte))) pr_err("Unexpected SPTE change! Nonpresent SPTEs\n" "should not be replaced with another,\n" "different nonpresent SPTE, unless one or both\n" @@ -661,7 +661,7 @@ static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, * should be used. If operating under the MMU lock in write mode, the * use of the removed SPTE should not be necessary. */ - WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte)); + WARN_ON_ONCE(is_removed_spte(old_spte) || is_removed_spte(new_spte)); old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level); @@ -709,7 +709,7 @@ static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter, bool flush, bool shared) { - WARN_ON(iter->yielded); + WARN_ON_ONCE(iter->yielded); /* Ensure forward progress has been made before yielding. */ if (iter->next_last_level_gfn == iter->yielded_gfn) @@ -728,7 +728,7 @@ static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, rcu_read_lock(); - WARN_ON(iter->gfn > iter->next_last_level_gfn); + WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn); iter->yielded = true; } @@ -1241,7 +1241,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, u64 new_spte; /* Huge pages aren't expected to be modified without first being zapped. */ - WARN_ON(pte_huge(range->arg.pte) || range->start + 1 != range->end); + WARN_ON_ONCE(pte_huge(range->arg.pte) || range->start + 1 != range->end); if (iter->level != PG_LEVEL_4K || !is_shadow_present_pte(iter->old_spte)) -- cgit From 72e2fb24a0b0528bcc16a8f6ad2cf336ac361525 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:47:18 -0700 Subject: KVM: x86/mmu: Bug the VM if a vCPU ends up in long mode without PAE enabled Promote the ASSERT(), which is quite dead code in KVM, into a KVM_BUG_ON() for KVM's sanity check that CR4.PAE=1 if the vCPU is in long mode when performing a walk of guest page tables. The sanity is quite cheap since neither EFER nor CR4.PAE requires a VMREAD, especially relative to the cost of walking the guest page tables. More importantly, the sanity check would have prevented the true badness fixed by commit 112e66017bff ("KVM: nVMX: add missing consistency checks for CR0 and CR4"). The missed consistency check resulted in some versions of KVM corrupting the on-stack guest_walker structure due to KVM thinking there are 4/5 levels of page tables, but wiring up the MMU hooks to point at the paging32 implementation, which only allocates space for two levels of page tables in "struct guest_walker32". Queue a page fault for injection if the assertion fails, as both callers, FNAME(gva_to_gpa) and FNAME(walk_addr_generic), assume that walker.fault contains sane info on a walk failure. E.g. not populating the fault info could result in KVM consuming and/or exposing uninitialized stack data before the vCPU is kicked out to userspace, which doesn't happen until KVM checks for KVM_REQ_VM_DEAD on the next enter. Move the check below the initialization of "pte_access" so that the aforementioned to-be-injected page fault doesn't consume uninitialized stack data. The information _shouldn't_ reach the guest or userspace, but there's zero downside to being paranoid in this case. Link: https://lore.kernel.org/r/20230729004722.1056172-9-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/paging_tmpl.h | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index a3fc7c1a7f8d..f8d358226ac6 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -338,7 +338,6 @@ retry_walk: } #endif walker->max_level = walker->level; - ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu))); /* * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging @@ -348,6 +347,17 @@ retry_walk: nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK; pte_access = ~0; + + /* + * Queue a page fault for injection if this assertion fails, as callers + * assume that walker.fault contains sane info on a walk failure. I.e. + * avoid making the situation worse by inducing even worse badness + * between when the assertion fails and when KVM kicks the vCPU out to + * userspace (because the VM is bugged). + */ + if (KVM_BUG_ON(is_long_mode(vcpu) && !is_pae(vcpu), vcpu->kvm)) + goto error; + ++walker->level; do { -- cgit From 870d4d4ed82779e717ec7fb0f9b64f43bd7a736b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:47:19 -0700 Subject: KVM: x86/mmu: Replace MMU_DEBUG with proper KVM_PROVE_MMU Kconfig MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace MMU_DEBUG, which requires manually modifying KVM to enable the macro, with a proper Kconfig, KVM_PROVE_MMU. Now that pgprintk() and rmap_printk() are gone, i.e. the macro guards only KVM_MMU_WARN_ON() and won't flood the kernel logs, enabling the option for debug kernels is both desirable and feasible. Reviewed-by: Philippe Mathieu-Daudé Link: https://lore.kernel.org/r/20230729004722.1056172-10-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/Kconfig | 13 +++++++++++++ arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/mmu/mmu_internal.h | 4 +--- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 66dbd1f4d57d..ed90f148140d 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -138,6 +138,19 @@ config KVM_XEN If in doubt, say "N". +config KVM_PROVE_MMU + bool "Prove KVM MMU correctness" + depends on DEBUG_KERNEL + depends on KVM + depends on EXPERT + help + Enables runtime assertions in KVM's MMU that are too costly to enable + in anything remotely resembling a production environment, e.g. this + gates code that verifies a to-be-freed page table doesn't have any + present SPTEs. + + If in doubt, say "N". + config KVM_EXTERNAL_WRITE_TRACKING bool diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ab3325981f9a..baa312388721 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1691,7 +1691,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp) { -#ifdef MMU_DEBUG +#ifdef CONFIG_KVM_PROVE_MMU int i; for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 6b6cd30bcf4b..606f343d99c1 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -6,9 +6,7 @@ #include #include -#undef MMU_DEBUG - -#ifdef MMU_DEBUG +#ifdef CONFIG_KVM_PROVE_MMU #define KVM_MMU_WARN_ON(x) WARN_ON_ONCE(x) #else #define KVM_MMU_WARN_ON(x) do { } while (0) -- cgit From 3328dfe0eac38df388a0cded8e3f3cd307ca0be3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:47:20 -0700 Subject: KVM: x86/mmu: Use BUILD_BUG_ON_INVALID() for KVM_MMU_WARN_ON() stub MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use BUILD_BUG_ON_INVALID() instead of an empty do-while loop to stub out KVM_MMU_WARN_ON() when CONFIG_KVM_PROVE_MMU=n, that way _some_ build issues with the usage of KVM_MMU_WARN_ON() will be dected even if the kernel is using the stubs, e.g. basic syntax errors will be detected. Reviewed-by: Philippe Mathieu-Daudé Link: https://lore.kernel.org/r/20230729004722.1056172-11-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu_internal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 606f343d99c1..95846c40e79e 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -9,7 +9,7 @@ #ifdef CONFIG_KVM_PROVE_MMU #define KVM_MMU_WARN_ON(x) WARN_ON_ONCE(x) #else -#define KVM_MMU_WARN_ON(x) do { } while (0) +#define KVM_MMU_WARN_ON(x) BUILD_BUG_ON_INVALID(x) #endif /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */ -- cgit From 069f30c619792d5202d72fecd842cacbee260561 Mon Sep 17 00:00:00 2001 From: Mingwei Zhang Date: Fri, 28 Jul 2023 17:47:21 -0700 Subject: KVM: x86/mmu: Plumb "struct kvm" all the way to pte_list_remove() Plumb "struct kvm" all the way to pte_list_remove() to allow the usage of KVM_BUG() and/or KVM_BUG_ON(). This will allow killing only the offending VM instead of doing BUG() if the kernel is built with CONFIG_BUG_ON_DATA_CORRUPTION=n, i.e. does NOT want to BUG() if KVM's data structures (rmaps) appear to be corrupted. Signed-off-by: Mingwei Zhang [sean: tweak changelog] Link: https://lore.kernel.org/r/20230729004722.1056172-12-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index baa312388721..f518dd569a14 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -961,7 +961,8 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte, return count; } -static void pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, +static void pte_list_desc_remove_entry(struct kvm *kvm, + struct kvm_rmap_head *rmap_head, struct pte_list_desc *desc, int i) { struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); @@ -997,7 +998,8 @@ static void pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, mmu_free_pte_list_desc(head_desc); } -static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) +static void pte_list_remove(struct kvm *kvm, u64 *spte, + struct kvm_rmap_head *rmap_head) { struct pte_list_desc *desc; int i; @@ -1016,7 +1018,8 @@ static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) while (desc) { for (i = 0; i < desc->spte_count; ++i) { if (desc->sptes[i] == spte) { - pte_list_desc_remove_entry(rmap_head, desc, i); + pte_list_desc_remove_entry(kvm, rmap_head, + desc, i); return; } } @@ -1031,7 +1034,7 @@ static void kvm_zap_one_rmap_spte(struct kvm *kvm, struct kvm_rmap_head *rmap_head, u64 *sptep) { mmu_spte_clear_track_bits(kvm, sptep); - pte_list_remove(sptep, rmap_head); + pte_list_remove(kvm, sptep, rmap_head); } /* Return true if at least one SPTE was zapped, false otherwise */ @@ -1106,7 +1109,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) slot = __gfn_to_memslot(slots, gfn); rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); - pte_list_remove(spte, rmap_head); + pte_list_remove(kvm, spte, rmap_head); } /* @@ -1753,16 +1756,16 @@ static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache, pte_list_add(cache, parent_pte, &sp->parent_ptes); } -static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, +static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *parent_pte) { - pte_list_remove(parent_pte, &sp->parent_ptes); + pte_list_remove(kvm, parent_pte, &sp->parent_ptes); } -static void drop_parent_pte(struct kvm_mmu_page *sp, +static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *parent_pte) { - mmu_page_remove_parent_pte(sp, parent_pte); + mmu_page_remove_parent_pte(kvm, sp, parent_pte); mmu_spte_clear_no_track(parent_pte); } @@ -2477,7 +2480,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (child->role.access == direct_access) return; - drop_parent_pte(child, sptep); + drop_parent_pte(vcpu->kvm, child, sptep); kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep); } } @@ -2495,7 +2498,7 @@ static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, drop_spte(kvm, spte); } else { child = spte_to_child_sp(pte); - drop_parent_pte(child, spte); + drop_parent_pte(kvm, child, spte); /* * Recursively zap nested TDP SPs, parentless SPs are @@ -2526,13 +2529,13 @@ static int kvm_mmu_page_unlink_children(struct kvm *kvm, return zapped; } -static void kvm_mmu_unlink_parents(struct kvm_mmu_page *sp) +static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) { u64 *sptep; struct rmap_iterator iter; while ((sptep = rmap_get_first(&sp->parent_ptes, &iter))) - drop_parent_pte(sp, sptep); + drop_parent_pte(kvm, sp, sptep); } static int mmu_zap_unsync_children(struct kvm *kvm, @@ -2571,7 +2574,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, ++kvm->stat.mmu_shadow_zapped; *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); *nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list); - kvm_mmu_unlink_parents(sp); + kvm_mmu_unlink_parents(kvm, sp); /* Zapping children means active_mmu_pages has become unstable. */ list_unstable = *nr_zapped; @@ -2929,7 +2932,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, u64 pte = *sptep; child = spte_to_child_sp(pte); - drop_parent_pte(child, sptep); + drop_parent_pte(vcpu->kvm, child, sptep); flush = true; } else if (pfn != spte_to_pfn(*sptep)) { drop_spte(vcpu->kvm, sptep); -- cgit From 52e322eda3d475614210efbc0f2793a1da9d367a Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:47:22 -0700 Subject: KVM: x86/mmu: BUG() in rmap helpers iff CONFIG_BUG_ON_DATA_CORRUPTION=y Introduce KVM_BUG_ON_DATA_CORRUPTION() and use it in the low-level rmap helpers to convert the existing BUG()s to WARN_ON_ONCE() when the kernel is built with CONFIG_BUG_ON_DATA_CORRUPTION=n, i.e. does NOT want to BUG() on corruption of host kernel data structures. Environments that don't have infrastructure to automatically capture crash dumps, i.e. aren't likely to enable CONFIG_BUG_ON_DATA_CORRUPTION=y, are typically better served overall by WARN-and-continue behavior (for the kernel, the VM is dead regardless), as a BUG() while holding mmu_lock all but guarantees the _best_ case scenario is a panic(). Make the BUG()s conditional instead of removing/replacing them entirely as there's a non-zero chance (though by no means a guarantee) that the damage isn't contained to the target VM, e.g. if no rmap is found for a SPTE then KVM may be double-zapping the SPTE, i.e. has already freed the memory the SPTE pointed at and thus KVM is reading/writing memory that KVM no longer owns. Link: https://lore.kernel.org/all/20221129191237.31447-1-mizhang@google.com Suggested-by: Mingwei Zhang Cc: David Matlack Cc: Jim Mattson Reviewed-by: Mingwei Zhang Link: https://lore.kernel.org/r/20230729004722.1056172-13-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 21 ++++++++++----------- include/linux/kvm_host.h | 19 +++++++++++++++++++ 2 files changed, 29 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index f518dd569a14..0420944da242 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -973,7 +973,7 @@ static void pte_list_desc_remove_entry(struct kvm *kvm, * when adding an entry and the previous head is full, and heads are * removed (this flow) when they become empty. */ - BUG_ON(j < 0); + KVM_BUG_ON_DATA_CORRUPTION(j < 0, kvm); /* * Replace the to-be-freed SPTE with the last valid entry from the head @@ -1004,14 +1004,13 @@ static void pte_list_remove(struct kvm *kvm, u64 *spte, struct pte_list_desc *desc; int i; - if (!rmap_head->val) { - pr_err("%s: %p 0->BUG\n", __func__, spte); - BUG(); - } else if (!(rmap_head->val & 1)) { - if ((u64 *)rmap_head->val != spte) { - pr_err("%s: %p 1->BUG\n", __func__, spte); - BUG(); - } + if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_head->val, kvm)) + return; + + if (!(rmap_head->val & 1)) { + if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_head->val != spte, kvm)) + return; + rmap_head->val = 0; } else { desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); @@ -1025,8 +1024,8 @@ static void pte_list_remove(struct kvm *kvm, u64 *spte, } desc = desc->more; } - pr_err("%s: %p many->many\n", __func__, spte); - BUG(); + + KVM_BUG_ON_DATA_CORRUPTION(true, kvm); } } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1b583f35547e..fb6c6109fdca 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -867,6 +867,25 @@ static inline void kvm_vm_bugged(struct kvm *kvm) unlikely(__ret); \ }) +/* + * Note, "data corruption" refers to corruption of host kernel data structures, + * not guest data. Guest data corruption, suspected or confirmed, that is tied + * and contained to a single VM should *never* BUG() and potentially panic the + * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure + * is corrupted and that corruption can have a cascading effect to other parts + * of the hosts and/or to other VMs. + */ +#define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \ +({ \ + bool __ret = !!(cond); \ + \ + if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \ + BUG_ON(__ret); \ + else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ + kvm_vm_bugged(kvm); \ + unlikely(__ret); \ +}) + static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu) { #ifdef CONFIG_PROVE_RCU -- cgit From f046923af79158361295ed4f0a588c80b9fdcc1d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:07 -0700 Subject: drm/i915/gvt: Verify pfn is "valid" before dereferencing "struct page" Check that the pfn found by gfn_to_pfn() is actually backed by "struct page" memory prior to retrieving and dereferencing the page. KVM supports backing guest memory with VM_PFNMAP, VM_IO, etc., and so there is no guarantee the pfn returned by gfn_to_pfn() has an associated "struct page". Fixes: b901b252b6cf ("drm/i915/gvt: Add 2M huge gtt support") Reviewed-by: Yan Zhao Tested-by: Yongwei Ma Reviewed-by: Zhi Wang Link: https://lore.kernel.org/r/20230729013535.1070024-2-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- drivers/gpu/drm/i915/gvt/gtt.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 4ec85308379a..58b9b316ae46 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1183,6 +1183,10 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry)); if (is_error_noslot_pfn(pfn)) return -EINVAL; + + if (!pfn_valid(pfn)) + return -EINVAL; + return PageTransHuge(pfn_to_page(pfn)); } -- cgit From 1e557c1cd0549b30fa4bc3e9aa46c5eeadaa63f5 Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Fri, 28 Jul 2023 18:35:08 -0700 Subject: drm/i915/gvt: remove interface intel_gvt_is_valid_gfn Currently intel_gvt_is_valid_gfn() is called in two places: (1) shadowing guest GGTT entry (2) shadowing guest PPGTT leaf entry, which was introduced in commit cc753fbe1ac4 ("drm/i915/gvt: validate gfn before set shadow page entry"). However, now it's not necessary to call this interface any more, because a. GGTT partial write issue has been fixed by commit bc0686ff5fad ("drm/i915/gvt: support inconsecutive partial gtt entry write") commit 510fe10b6180 ("drm/i915/gvt: fix a bug of partially write ggtt enties") b. PPGTT resides in normal guest RAM and we only treat 8-byte writes as valid page table writes. Any invalid GPA found is regarded as an error, either due to guest misbehavior/attack or bug in host shadow code. So,rather than do GFN pre-checking and replace invalid GFNs with scratch GFN and continue silently, just remove the pre-checking and abort PPGTT shadowing on error detected. c. GFN validity check is still performed in intel_gvt_dma_map_guest_page() --> gvt_pin_guest_page(). It's more desirable to call VFIO interface to do both validity check and mapping. Calling intel_gvt_is_valid_gfn() to do GFN validity check from KVM side while later mapping the GFN through VFIO interface is unnecessarily fragile and confusing for unaware readers. Signed-off-by: Yan Zhao [sean: remove now-unused local variables] Acked-by: Zhi Wang Tested-by: Yongwei Ma Link: https://lore.kernel.org/r/20230729013535.1070024-3-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- drivers/gpu/drm/i915/gvt/gtt.c | 36 +----------------------------------- 1 file changed, 1 insertion(+), 35 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 58b9b316ae46..f30922c55a0c 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -49,22 +49,6 @@ static bool enable_out_of_sync = false; static int preallocated_oos_pages = 8192; -static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) -{ - struct kvm *kvm = vgpu->vfio_device.kvm; - int idx; - bool ret; - - if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) - return false; - - idx = srcu_read_lock(&kvm->srcu); - ret = kvm_is_visible_gfn(kvm, gfn); - srcu_read_unlock(&kvm->srcu, idx); - - return ret; -} - /* * validate a gm address and related range size, * translate it to host gm address @@ -1333,11 +1317,9 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) { struct intel_vgpu *vgpu = spt->vgpu; - struct intel_gvt *gvt = vgpu->gvt; - const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; struct intel_gvt_gtt_entry se, ge; - unsigned long gfn, i; + unsigned long i; int ret; trace_spt_change(spt->vgpu->id, "born", spt, @@ -1354,13 +1336,6 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) ppgtt_generate_shadow_entry(&se, s, &ge); ppgtt_set_shadow_entry(spt, &se, i); } else { - gfn = ops->get_pfn(&ge); - if (!intel_gvt_is_valid_gfn(vgpu, gfn)) { - ops->set_pfn(&se, gvt->gtt.scratch_mfn); - ppgtt_set_shadow_entry(spt, &se, i); - continue; - } - ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge); if (ret) goto fail; @@ -2335,14 +2310,6 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, m.val64 = e.val64; m.type = e.type; - /* one PTE update may be issued in multiple writes and the - * first write may not construct a valid gfn - */ - if (!intel_gvt_is_valid_gfn(vgpu, gfn)) { - ops->set_pfn(&m, gvt->gtt.scratch_mfn); - goto out; - } - ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr); if (ret) { @@ -2359,7 +2326,6 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ops->clear_present(&m); } -out: ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index); -- cgit From adc7b226b7d675d011fe86447a5a36b932f1b061 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:09 -0700 Subject: drm/i915/gvt: Verify hugepages are contiguous in physical address space When shadowing a GTT entry with a 2M page, verify that the pfns are contiguous, not just that the struct page pointers are contiguous. The memory map is virtual contiguous if "CONFIG_FLATMEM=y || CONFIG_SPARSEMEM_VMEMMAP=y", but not for "CONFIG_SPARSEMEM=y && CONFIG_SPARSEMEM_VMEMMAP=n", so theoretically KVMGT could encounter struct pages that are virtually contiguous, but not physically contiguous. In practice, this flaw is likely a non-issue as it would cause functional problems iff a section isn't 2M aligned _and_ is directly adjacent to another section with discontiguous pfns. Tested-by: Yongwei Ma Reviewed-by: Zhi Wang Reviewed-by: Yan Zhao Link: https://lore.kernel.org/r/20230729013535.1070024-4-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- drivers/gpu/drm/i915/gvt/kvmgt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index de675d799c7d..429f0f993a13 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -161,7 +161,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, if (npage == 0) base_page = cur_page; - else if (base_page + npage != cur_page) { + else if (page_to_pfn(base_page) + npage != page_to_pfn(cur_page)) { gvt_vgpu_err("The pages are not continuous\n"); ret = -EINVAL; npage++; -- cgit From a15e61f3371b35b7db2d7890fdc68e5f7678e49f Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Fri, 28 Jul 2023 18:35:10 -0700 Subject: drm/i915/gvt: Don't try to unpin an empty page range Attempt to unpin pages in the error path of gvt_pin_guest_page() if and only if at least one page was successfully pinned. Unpinning doesn't cause functional problems, but vfio_device_container_unpin_pages() rightfully warns about being asked to unpin zero pages. Signed-off-by: Yan Zhao [sean: write changelog] Reviewed-by: Zhi Wang Link: https://lore.kernel.org/r/20230729013535.1070024-5-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- drivers/gpu/drm/i915/gvt/kvmgt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 429f0f993a13..0366a699baf5 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -172,7 +172,8 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, *page = base_page; return 0; err: - gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); + if (npage) + gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); return ret; } -- cgit From 708e49583d7da863898b25dafe4bcd799c414278 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:11 -0700 Subject: drm/i915/gvt: Put the page reference obtained by KVM's gfn_to_pfn() Put the struct page reference acquired by gfn_to_pfn(), KVM's API is that the caller is ultimately responsible for dropping any reference. Note, kvm_release_pfn_clean() ensures the pfn is actually a refcounted struct page before trying to put any references. Fixes: b901b252b6cf ("drm/i915/gvt: Add 2M huge gtt support") Reviewed-by: Yan Zhao Tested-by: Yongwei Ma Reviewed-by: Zhi Wang Link: https://lore.kernel.org/r/20230729013535.1070024-6-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- drivers/gpu/drm/i915/gvt/gtt.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index f30922c55a0c..5426a27c1b71 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1158,6 +1158,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, { const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; kvm_pfn_t pfn; + int ret; if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M)) return 0; @@ -1171,7 +1172,9 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, if (!pfn_valid(pfn)) return -EINVAL; - return PageTransHuge(pfn_to_page(pfn)); + ret = PageTransHuge(pfn_to_page(pfn)); + kvm_release_pfn_clean(pfn); + return ret; } static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, -- cgit From 96b138cd23e978cd0975d1f4d5709dae2e061fd1 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 3 Aug 2023 15:32:06 -0700 Subject: drm/i915/gvt: Explicitly check that vGPU is attached before shadowing Move the check that a vGPU is attached from is_2MB_gtt_possible() all the way up to shadow_ppgtt_mm() to avoid unnecessary work, and to make it more obvious that a future cleanup of is_2MB_gtt_possible() isn't introducing a bug. is_2MB_gtt_possible() has only one caller, ppgtt_populate_shadow_entry(), and all paths in ppgtt_populate_shadow_entry() eventually check for attachment by way of intel_gvt_dma_map_guest_page(). And of the paths that lead to ppgtt_populate_shadow_entry(), shadow_ppgtt_mm() is the only one that doesn't already check for INTEL_VGPU_STATUS_ACTIVE or INTEL_VGPU_STATUS_ATTACHED. workload_thread() <= pick_next_workload() => INTEL_VGPU_STATUS_ACTIVE | -> dispatch_workload() | |-> prepare_workload() | -> intel_vgpu_sync_oos_pages() | | | |-> ppgtt_set_guest_page_sync() | | | |-> sync_oos_page() | | | |-> ppgtt_populate_shadow_entry() | |-> intel_vgpu_flush_post_shadow() | 1: |-> ppgtt_handle_guest_write_page_table() | |-> ppgtt_handle_guest_entry_add() | 2: | -> ppgtt_populate_spt_by_guest_entry() | | | |-> ppgtt_populate_spt() | | | |-> ppgtt_populate_shadow_entry() | | | |-> ppgtt_populate_spt_by_guest_entry() [see 2] | |-> ppgtt_populate_shadow_entry() kvmgt_page_track_write() <= KVM callback => INTEL_VGPU_STATUS_ATTACHED | |-> intel_vgpu_page_track_handler() | |-> ppgtt_write_protection_handler() | |-> ppgtt_handle_guest_write_page_table_bytes() | |-> ppgtt_handle_guest_write_page_table() [see 1] Reviewed-by: Yan Zhao Tested-by: Yan Zhao Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- drivers/gpu/drm/i915/gvt/gtt.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 5426a27c1b71..de6a484090d7 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1163,8 +1163,6 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M)) return 0; - if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) - return -EINVAL; pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry)); if (is_error_noslot_pfn(pfn)) return -EINVAL; @@ -1827,6 +1825,9 @@ static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) if (mm->ppgtt_mm.shadowed) return 0; + if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) + return -EINVAL; + mm->ppgtt_mm.shadowed = true; for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) { -- cgit From 241f0aadb8577dd17fa521bf19ba7ae85dc33d41 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:13 -0700 Subject: drm/i915/gvt: Error out on an attempt to shadowing an unknown GTT entry type Bail from ppgtt_populate_shadow_entry() if an unexpected GTT entry type is encountered instead of subtly falling through to the common "direct shadow" path. Eliminating the default/error path's reliance on the common handling will allow hoisting intel_gvt_dma_map_guest_page() into the case statements so that the 2MiB case can try intel_gvt_dma_map_guest_page() and fallback to splitting the entry on failure. Reviewed-by: Zhi Wang Tested-by: Yongwei Ma Reviewed-by: Yan Zhao Link: https://lore.kernel.org/r/20230729013535.1070024-8-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- drivers/gpu/drm/i915/gvt/gtt.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index de6a484090d7..eab844dffb8c 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1303,6 +1303,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, return -EINVAL; default: GEM_BUG_ON(1); + return -EINVAL; } /* direct shadow */ -- cgit From ba193f62c075768b475efbaec32481a20869813f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:14 -0700 Subject: drm/i915/gvt: Don't rely on KVM's gfn_to_pfn() to query possible 2M GTT Now that gvt_pin_guest_page() explicitly verifies the pinned PFN is a transparent hugepage page, don't use KVM's gfn_to_pfn() to pre-check if a 2MiB GTT entry is possible and instead just try to map the GFN with a 2MiB entry. Using KVM to query pfn that is ultimately managed through VFIO is odd, and KVM's gfn_to_pfn() is not intended for non-KVM consumption; it's exported only because of KVM vendor modules (x86 and PPC). Open code the check on 2MiB support instead of keeping is_2MB_gtt_possible() around for a single line of code. Move the call to intel_gvt_dma_map_guest_page() for a 4KiB entry into its case statement, i.e. fork the common path into the 4KiB and 2MiB "direct" shadow paths. Keeping the call in the "common" path is arguably more in the spirit of "one change per patch", but retaining the local "page_size" variable is silly, i.e. the call site will be changed either way, and jumping around the no-longer-common code is more subtle and rather odd, i.e. would just need to be immediately cleaned up. Drop the error message from gvt_pin_guest_page() when KVMGT attempts to shadow a 2MiB guest page that isn't backed by a compatible hugepage in the host. Dropping the pre-check on a THP makes it much more likely that the "error" will be encountered in normal operation. Reviewed-by: Yan Zhao Tested-by: Yan Zhao Tested-by: Yongwei Ma Reviewed-by: Zhi Wang Link: https://lore.kernel.org/r/20230729013535.1070024-9-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- drivers/gpu/drm/i915/gvt/gtt.c | 49 +++++++--------------------------------- drivers/gpu/drm/i915/gvt/kvmgt.c | 1 - 2 files changed, 8 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index eab844dffb8c..dc5ad590ad03 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1145,36 +1145,6 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, ops->set_pfn(se, s->shadow_page.mfn); } -/* - * Check if can do 2M page - * @vgpu: target vgpu - * @entry: target pfn's gtt entry - * - * Return 1 if 2MB huge gtt shadowing is possible, 0 if miscondition, - * negative if found err. - */ -static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, - struct intel_gvt_gtt_entry *entry) -{ - const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; - kvm_pfn_t pfn; - int ret; - - if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M)) - return 0; - - pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry)); - if (is_error_noslot_pfn(pfn)) - return -EINVAL; - - if (!pfn_valid(pfn)) - return -EINVAL; - - ret = PageTransHuge(pfn_to_page(pfn)); - kvm_release_pfn_clean(pfn); - return ret; -} - static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *se) @@ -1268,7 +1238,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, { const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry se = *ge; - unsigned long gfn, page_size = PAGE_SIZE; + unsigned long gfn; dma_addr_t dma_addr; int ret; @@ -1280,6 +1250,9 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, switch (ge->type) { case GTT_TYPE_PPGTT_PTE_4K_ENTRY: gvt_vdbg_mm("shadow 4K gtt entry\n"); + ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr); + if (ret) + return -ENXIO; break; case GTT_TYPE_PPGTT_PTE_64K_ENTRY: gvt_vdbg_mm("shadow 64K gtt entry\n"); @@ -1291,12 +1264,10 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, return split_64KB_gtt_entry(vgpu, spt, index, &se); case GTT_TYPE_PPGTT_PTE_2M_ENTRY: gvt_vdbg_mm("shadow 2M gtt entry\n"); - ret = is_2MB_gtt_possible(vgpu, ge); - if (ret == 0) + if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M) || + intel_gvt_dma_map_guest_page(vgpu, gfn, + I915_GTT_PAGE_SIZE_2M, &dma_addr)) return split_2MB_gtt_entry(vgpu, spt, index, &se); - else if (ret < 0) - return ret; - page_size = I915_GTT_PAGE_SIZE_2M; break; case GTT_TYPE_PPGTT_PTE_1G_ENTRY: gvt_vgpu_err("GVT doesn't support 1GB entry\n"); @@ -1306,11 +1277,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, return -EINVAL; } - /* direct shadow */ - ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr); - if (ret) - return -ENXIO; - + /* Successfully shadowed a 4K or 2M page (without splitting). */ pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT); ppgtt_set_shadow_entry(spt, &se, index); return 0; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 0366a699baf5..97c6d3c53710 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -162,7 +162,6 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, if (npage == 0) base_page = cur_page; else if (page_to_pfn(base_page) + npage != page_to_pfn(cur_page)) { - gvt_vgpu_err("The pages are not continuous\n"); ret = -EINVAL; npage++; goto err; -- cgit From 16735297fdce2c7952dbe23288aad0d327fc444b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:15 -0700 Subject: drm/i915/gvt: Use an "unsigned long" to iterate over memslot gfns Use an "unsigned long" instead of an "int" when iterating over the gfns in a memslot. The number of pages in the memslot is tracked as an "unsigned long", e.g. KVMGT could theoretically break if a KVM memslot larger than 16TiB were deleted (2^32 * 4KiB). Reviewed-by: Yan Zhao Tested-by: Yongwei Ma Reviewed-by: Zhi Wang Link: https://lore.kernel.org/r/20230729013535.1070024-10-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- drivers/gpu/drm/i915/gvt/kvmgt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 97c6d3c53710..6f52886c4051 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1620,7 +1620,7 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot, struct kvm_page_track_notifier_node *node) { - int i; + unsigned long i; gfn_t gfn; struct intel_vgpu *info = container_of(node, struct intel_vgpu, track_node); -- cgit From a90c367e5af63880008e21dd199dac839e0e9e0f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:16 -0700 Subject: drm/i915/gvt: Drop unused helper intel_vgpu_reset_gtt() Drop intel_vgpu_reset_gtt() as it no longer has any callers. In addition to eliminating dead code, this eliminates the last possible scenario where __kvmgt_protect_table_find() can be reached without holding vgpu_lock. Requiring vgpu_lock to be held when calling __kvmgt_protect_table_find() will allow a protecting the gfn hash with vgpu_lock without too much fuss. No functional change intended. Fixes: ba25d977571e ("drm/i915/gvt: Do not destroy ppgtt_mm during vGPU D3->D0.") Reviewed-by: Yan Zhao Tested-by: Yongwei Ma Reviewed-by: Zhi Wang Link: https://lore.kernel.org/r/20230729013535.1070024-11-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- drivers/gpu/drm/i915/gvt/gtt.c | 18 ------------------ drivers/gpu/drm/i915/gvt/gtt.h | 1 - 2 files changed, 19 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index dc5ad590ad03..094fca9b0e73 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2817,24 +2817,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) ggtt_invalidate(gvt->gt); } -/** - * intel_vgpu_reset_gtt - reset the all GTT related status - * @vgpu: a vGPU - * - * This function is called from vfio core to reset reset all - * GTT related status, including GGTT, PPGTT, scratch page. - * - */ -void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) -{ - /* Shadow pages are only created when there is no page - * table tracking data, so remove page tracking data after - * removing the shadow pages. - */ - intel_vgpu_destroy_all_ppgtt_mm(vgpu); - intel_vgpu_reset_ggtt(vgpu, true); -} - /** * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries * @gvt: intel gvt device diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index a3b0f59ec8bd..4cb183e06e95 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -224,7 +224,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); int intel_gvt_init_gtt(struct intel_gvt *gvt); -void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu); void intel_gvt_clean_gtt(struct intel_gvt *gvt); struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, -- cgit From 3cca6b262876d30aae423431e8392d8b97b044fd Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:17 -0700 Subject: drm/i915/gvt: Protect gfn hash table with vgpu_lock Use vgpu_lock instead of KVM's mmu_lock to protect accesses to the hash table used to track which gfns are write-protected when shadowing the guest's GTT, and hoist the acquisition of vgpu_lock from intel_vgpu_page_track_handler() out to its sole caller, kvmgt_page_track_write(). This fixes a bug where kvmgt_page_track_write(), which doesn't hold kvm->mmu_lock, could race with intel_gvt_page_track_remove() and trigger a use-after-free. Fixing kvmgt_page_track_write() by taking kvm->mmu_lock is not an option as mmu_lock is a r/w spinlock, and intel_vgpu_page_track_handler() might sleep when acquiring vgpu->cache_lock deep down the callstack: intel_vgpu_page_track_handler() | |-> page_track->handler / ppgtt_write_protection_handler() | |-> ppgtt_handle_guest_write_page_table_bytes() | |-> ppgtt_handle_guest_write_page_table() | |-> ppgtt_handle_guest_entry_removal() | |-> ppgtt_invalidate_pte() | |-> intel_gvt_dma_unmap_guest_page() | |-> mutex_lock(&vgpu->cache_lock); Reviewed-by: Yan Zhao Tested-by: Yongwei Ma Reviewed-by: Zhi Wang Link: https://lore.kernel.org/r/20230729013535.1070024-12-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- drivers/gpu/drm/i915/gvt/kvmgt.c | 39 +++++++++++++++++++++-------------- drivers/gpu/drm/i915/gvt/page_track.c | 10 ++------- 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 6f52886c4051..034be0655daa 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -352,6 +352,8 @@ __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p, *res = NULL; + lockdep_assert_held(&info->vgpu_lock); + hash_for_each_possible(info->ptable, p, hnode, gfn) { if (gfn == p->gfn) { res = p; @@ -1553,6 +1555,9 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status)) return -ESRCH; + if (kvmgt_gfn_is_write_protected(info, gfn)) + return 0; + idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); if (!slot) { @@ -1561,16 +1566,12 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) } write_lock(&kvm->mmu_lock); - - if (kvmgt_gfn_is_write_protected(info, gfn)) - goto out; - kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); - kvmgt_protect_table_add(info, gfn); - -out: write_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); + + kvmgt_protect_table_add(info, gfn); return 0; } @@ -1583,6 +1584,9 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status)) return -ESRCH; + if (!kvmgt_gfn_is_write_protected(info, gfn)) + return 0; + idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); if (!slot) { @@ -1591,16 +1595,11 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) } write_lock(&kvm->mmu_lock); - - if (!kvmgt_gfn_is_write_protected(info, gfn)) - goto out; - kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); - kvmgt_protect_table_del(info, gfn); - -out: write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); + + kvmgt_protect_table_del(info, gfn); return 0; } @@ -1611,9 +1610,13 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, struct intel_vgpu *info = container_of(node, struct intel_vgpu, track_node); + mutex_lock(&info->vgpu_lock); + if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa))) intel_vgpu_page_track_handler(info, gpa, (void *)val, len); + + mutex_unlock(&info->vgpu_lock); } static void kvmgt_page_track_flush_slot(struct kvm *kvm, @@ -1625,16 +1628,20 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, struct intel_vgpu *info = container_of(node, struct intel_vgpu, track_node); - write_lock(&kvm->mmu_lock); + mutex_lock(&info->vgpu_lock); + for (i = 0; i < slot->npages; i++) { gfn = slot->base_gfn + i; if (kvmgt_gfn_is_write_protected(info, gfn)) { + write_lock(&kvm->mmu_lock); kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); + write_unlock(&kvm->mmu_lock); + kvmgt_protect_table_del(info, gfn); } } - write_unlock(&kvm->mmu_lock); + mutex_unlock(&info->vgpu_lock); } void intel_vgpu_detach_regions(struct intel_vgpu *vgpu) diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c index df34e73cba41..60a65435556d 100644 --- a/drivers/gpu/drm/i915/gvt/page_track.c +++ b/drivers/gpu/drm/i915/gvt/page_track.c @@ -162,13 +162,9 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, struct intel_vgpu_page_track *page_track; int ret = 0; - mutex_lock(&vgpu->vgpu_lock); - page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT); - if (!page_track) { - ret = -ENXIO; - goto out; - } + if (!page_track) + return -ENXIO; if (unlikely(vgpu->failsafe)) { /* Remove write protection to prevent furture traps. */ @@ -179,7 +175,5 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, gvt_err("guest page write error, gpa %llx\n", gpa); } -out: - mutex_unlock(&vgpu->vgpu_lock); return ret; } -- cgit From db0d70e61082a71e32147733ac082030cdbff0af Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:18 -0700 Subject: KVM: x86/mmu: Move kvm_arch_flush_shadow_{all,memslot}() to mmu.c Move x86's implementation of kvm_arch_flush_shadow_{all,memslot}() into mmu.c, and make kvm_mmu_zap_all() static as it was globally visible only for kvm_arch_flush_shadow_all(). This will allow refactoring kvm_arch_flush_shadow_memslot() to call kvm_mmu_zap_all() directly without having to expose kvm_mmu_zap_all_fast() outside of mmu.c. Keeping everything in mmu.c will also likely simplify supporting TDX, which intends to do zap only relevant SPTEs on memslot updates. No functional change intended. Suggested-by: Yan Zhao Tested-by: Yongwei Ma Link: https://lore.kernel.org/r/20230729013535.1070024-13-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/mmu/mmu.c | 13 ++++++++++++- arch/x86/kvm/x86.c | 11 ----------- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index e3c9ff4146fc..9e76c910e52c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1853,7 +1853,6 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *memslot); void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, const struct kvm_memory_slot *memslot); -void kvm_mmu_zap_all(struct kvm *kvm); void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 0420944da242..acb99a9c41ba 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -6678,7 +6678,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, */ } -void kvm_mmu_zap_all(struct kvm *kvm) +static void kvm_mmu_zap_all(struct kvm *kvm) { struct kvm_mmu_page *sp, *node; LIST_HEAD(invalid_list); @@ -6703,6 +6703,17 @@ restart: write_unlock(&kvm->mmu_lock); } +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ + kvm_mmu_zap_all(kvm); +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ + kvm_page_track_flush_slot(kvm, slot); +} + void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) { WARN_ON_ONCE(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0b38a046690e..36f0ba21661d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -12802,17 +12802,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, kvm_arch_free_memslot(kvm, old); } -void kvm_arch_flush_shadow_all(struct kvm *kvm) -{ - kvm_mmu_zap_all(kvm); -} - -void kvm_arch_flush_shadow_memslot(struct kvm *kvm, - struct kvm_memory_slot *slot) -{ - kvm_page_track_flush_slot(kvm, slot); -} - static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) { return (is_guest_mode(vcpu) && -- cgit From eeb87272a364311dbfe1b10dd896c3e09a02cd03 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:19 -0700 Subject: KVM: x86/mmu: Don't rely on page-track mechanism to flush on memslot change Call kvm_mmu_zap_all_fast() directly when flushing a memslot instead of bouncing through the page-track mechanism. KVM (unfortunately) needs to zap and flush all page tables on memslot DELETE/MOVE irrespective of whether KVM is shadowing guest page tables. This will allow changing KVM to register a page-track notifier on the first shadow root allocation, and will also allow deleting the misguided kvm_page_track_flush_slot() hook itself once KVM-GT also moves to a different method for reacting to memslot changes. No functional change intended. Cc: Yan Zhao Link: https://lore.kernel.org/r/20221110014821.1548347-2-seanjc@google.com Reviewed-by: Yan Zhao Tested-by: Yongwei Ma Link: https://lore.kernel.org/r/20230729013535.1070024-14-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index acb99a9c41ba..eabd5b13970d 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -6159,13 +6159,6 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); } -static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, - struct kvm_memory_slot *slot, - struct kvm_page_track_notifier_node *node) -{ - kvm_mmu_zap_all_fast(kvm); -} - int kvm_mmu_init_vm(struct kvm *kvm) { struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; @@ -6183,7 +6176,6 @@ int kvm_mmu_init_vm(struct kvm *kvm) } node->track_write = kvm_mmu_pte_write; - node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot; kvm_page_track_register_notifier(kvm, node); kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache; @@ -6711,6 +6703,8 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm) void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { + kvm_mmu_zap_all_fast(kvm); + kvm_page_track_flush_slot(kvm, slot); } -- cgit From 932844462ae310493d0d21b1c5d7464d59702b4d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:20 -0700 Subject: KVM: x86/mmu: Don't bounce through page-track mechanism for guest PTEs Don't use the generic page-track mechanism to handle writes to guest PTEs in KVM's MMU. KVM's MMU needs access to information that should not be exposed to external page-track users, e.g. KVM needs (for some definitions of "need") the vCPU to query the current paging mode, whereas external users, i.e. KVMGT, have no ties to the current vCPU and so should never need the vCPU. Moving away from the page-track mechanism will allow dropping use of the page-track mechanism for KVM's own MMU, and will also allow simplifying and cleaning up the page-track APIs. Reviewed-by: Yan Zhao Tested-by: Yongwei Ma Link: https://lore.kernel.org/r/20230729013535.1070024-15-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/mmu.h | 2 ++ arch/x86/kvm/mmu/mmu.c | 13 ++----------- arch/x86/kvm/mmu/page_track.c | 2 ++ 4 files changed, 6 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9e76c910e52c..93fdc42458cc 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1265,7 +1265,6 @@ struct kvm_arch { * create an NX huge page (without hanging the guest). */ struct list_head possible_nx_huge_pages; - struct kvm_page_track_notifier_node mmu_sp_tracker; struct kvm_page_track_notifier_head track_notifier_head; /* * Protects marking pages unsync during page faults, as TDP MMU page diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 92d5a1924fc1..253fb2093d5d 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -121,6 +121,8 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu); void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu); void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu); +void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, + int bytes); static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) { diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index eabd5b13970d..28385642eb98 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5635,9 +5635,8 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) return spte; } -static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, - const u8 *new, int bytes, - struct kvm_page_track_notifier_node *node) +void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, + int bytes) { gfn_t gfn = gpa >> PAGE_SHIFT; struct kvm_mmu_page *sp; @@ -6161,7 +6160,6 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) int kvm_mmu_init_vm(struct kvm *kvm) { - struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; int r; INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); @@ -6175,9 +6173,6 @@ int kvm_mmu_init_vm(struct kvm *kvm) return r; } - node->track_write = kvm_mmu_pte_write; - kvm_page_track_register_notifier(kvm, node); - kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache; kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO; @@ -6198,10 +6193,6 @@ static void mmu_free_vm_memory_caches(struct kvm *kvm) void kvm_mmu_uninit_vm(struct kvm *kvm) { - struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; - - kvm_page_track_unregister_notifier(kvm, node); - if (tdp_mmu_enabled) kvm_mmu_uninit_tdp_mmu(kvm); diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index fd16918b3a7a..bca0994ea157 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -274,6 +274,8 @@ void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, if (n->track_write) n->track_write(vcpu, gpa, new, bytes, n); srcu_read_unlock(&head->track_srcu, idx); + + kvm_mmu_track_write(vcpu, gpa, new, bytes); } /* -- cgit From b271e17defb07560399734c7aefbdd0fc961c601 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:21 -0700 Subject: KVM: drm/i915/gvt: Drop @vcpu from KVM's ->track_write() hook Drop @vcpu from KVM's ->track_write() hook provided for external users of the page-track APIs now that KVM itself doesn't use the page-track mechanism. Reviewed-by: Yan Zhao Tested-by: Yongwei Ma Reviewed-by: Zhi Wang Link: https://lore.kernel.org/r/20230729013535.1070024-16-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 5 ++--- arch/x86/kvm/mmu/page_track.c | 2 +- drivers/gpu/drm/i915/gvt/kvmgt.c | 10 ++++------ 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index eb186bc57f6a..8c4d216e3b2b 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -26,14 +26,13 @@ struct kvm_page_track_notifier_node { * It is called when guest is writing the write-tracked page * and write emulation is finished at that time. * - * @vcpu: the vcpu where the write access happened. * @gpa: the physical address written by guest. * @new: the data was written to the address. * @bytes: the written length. * @node: this node */ - void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, - int bytes, struct kvm_page_track_notifier_node *node); + void (*track_write)(gpa_t gpa, const u8 *new, int bytes, + struct kvm_page_track_notifier_node *node); /* * It is called when memory slot is being moved or removed * users can drop write-protection for the pages in that memory slot diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index bca0994ea157..772be81f97cc 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -272,7 +272,7 @@ void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, hlist_for_each_entry_srcu(n, &head->track_notifier_list, node, srcu_read_lock_held(&head->track_srcu)) if (n->track_write) - n->track_write(vcpu, gpa, new, bytes, n); + n->track_write(gpa, new, bytes, n); srcu_read_unlock(&head->track_srcu, idx); kvm_mmu_track_write(vcpu, gpa, new, bytes); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 034be0655daa..e9276500435d 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -106,9 +106,8 @@ struct gvt_dma { #define vfio_dev_to_vgpu(vfio_dev) \ container_of((vfio_dev), struct intel_vgpu, vfio_device) -static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, - const u8 *val, int len, - struct kvm_page_track_notifier_node *node); +static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len, + struct kvm_page_track_notifier_node *node); static void kvmgt_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot, struct kvm_page_track_notifier_node *node); @@ -1603,9 +1602,8 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) return 0; } -static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, - const u8 *val, int len, - struct kvm_page_track_notifier_node *node) +static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len, + struct kvm_page_track_notifier_node *node) { struct intel_vgpu *info = container_of(node, struct intel_vgpu, track_node); -- cgit From c70934e0ab2d2c4bb16bf70c3e3db8c064a1761b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:22 -0700 Subject: KVM: x86: Reject memslot MOVE operations if KVMGT is attached Disallow moving memslots if the VM has external page-track users, i.e. if KVMGT is being used to expose a virtual GPU to the guest, as KVMGT doesn't correctly handle moving memory regions. Note, this is potential ABI breakage! E.g. userspace could move regions that aren't shadowed by KVMGT without harming the guest. However, the only known user of KVMGT is QEMU, and QEMU doesn't move generic memory regions. KVM's own support for moving memory regions was also broken for multiple years (albeit for an edge case, but arguably moving RAM is itself an edge case), e.g. see commit edd4fa37baa6 ("KVM: x86: Allocate new rmap and large page tracking when moving memslot"). Reviewed-by: Yan Zhao Tested-by: Yongwei Ma Link: https://lore.kernel.org/r/20230729013535.1070024-17-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 3 +++ arch/x86/kvm/mmu/page_track.c | 5 +++++ arch/x86/kvm/x86.c | 7 +++++++ 3 files changed, 15 insertions(+) diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 8c4d216e3b2b..f744682648e7 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -75,4 +75,7 @@ kvm_page_track_unregister_notifier(struct kvm *kvm, void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, int bytes); void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot); + +bool kvm_page_track_has_external_user(struct kvm *kvm); + #endif diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 772be81f97cc..cfd0b8092d06 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -303,3 +303,8 @@ void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot) n->track_flush_slot(kvm, slot, n); srcu_read_unlock(&head->track_srcu, idx); } + +bool kvm_page_track_has_external_user(struct kvm *kvm) +{ + return !hlist_empty(&kvm->arch.track_notifier_head.track_notifier_list); +} diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 36f0ba21661d..161e97e6caa9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -12632,6 +12632,13 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *new, enum kvm_mr_change change) { + /* + * KVM doesn't support moving memslots when there are external page + * trackers attached to the VM, i.e. if KVMGT is in use. + */ + if (change == KVM_MR_MOVE && kvm_page_track_has_external_user(kvm)) + return -EINVAL; + if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) { if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn()) return -EINVAL; -- cgit From 2ee05a4c275a54d9b64599bcbf7117a9490c9e43 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:23 -0700 Subject: drm/i915/gvt: Don't bother removing write-protection on to-be-deleted slot When handling a slot "flush", don't call back into KVM to drop write protection for gfns in the slot. Now that KVM rejects attempts to move memory slots while KVMGT is attached, the only time a slot is "flushed" is when it's being removed, i.e. the memslot and all its write-tracking metadata is about to be deleted. Reviewed-by: Yan Zhao Tested-by: Yongwei Ma Reviewed-by: Zhi Wang Link: https://lore.kernel.org/r/20230729013535.1070024-18-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- drivers/gpu/drm/i915/gvt/kvmgt.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index e9276500435d..3ea3cb9eb599 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1630,14 +1630,8 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, for (i = 0; i < slot->npages; i++) { gfn = slot->base_gfn + i; - if (kvmgt_gfn_is_write_protected(info, gfn)) { - write_lock(&kvm->mmu_lock); - kvm_slot_page_track_remove_page(kvm, slot, gfn, - KVM_PAGE_TRACK_WRITE); - write_unlock(&kvm->mmu_lock); - + if (kvmgt_gfn_is_write_protected(info, gfn)) kvmgt_protect_table_del(info, gfn); - } } mutex_unlock(&info->vgpu_lock); } -- cgit From b83ab124ded3c17f911668522a2e916ca8d3c523 Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Fri, 28 Jul 2023 18:35:24 -0700 Subject: KVM: x86: Add a new page-track hook to handle memslot deletion Add a new page-track hook, track_remove_region(), that is called when a memslot DELETE operation is about to be committed. The "remove" hook will be used by KVMGT and will effectively replace the existing track_flush_slot() altogether now that KVM itself doesn't rely on the "flush" hook either. The "flush" hook is flawed as it's invoked before the memslot operation is guaranteed to succeed, i.e. KVM might ultimately keep the existing memslot without notifying external page track users, a.k.a. KVMGT. In practice, this can't currently happen on x86, but there are no guarantees that won't change in the future, not to mention that "flush" does a very poor job of describing what is happening. Pass in the gfn+nr_pages instead of the slot itself so external users, i.e. KVMGT, don't need to exposed to KVM internals (memslots). This will help set the stage for additional cleanups to the page-track APIs. Opportunistically align the existing srcu_read_lock_held() usage so that the new case doesn't stand out like a sore thumb (and not aligning the new code makes bots unhappy). Cc: Zhenyu Wang Tested-by: Yongwei Ma Signed-off-by: Yan Zhao Co-developed-by: Sean Christopherson Link: https://lore.kernel.org/r/20230729013535.1070024-19-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 12 ++++++++++++ arch/x86/kvm/mmu/page_track.c | 27 +++++++++++++++++++++++++-- arch/x86/kvm/x86.c | 3 +++ 3 files changed, 40 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index f744682648e7..cfd36c22b467 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -43,6 +43,17 @@ struct kvm_page_track_notifier_node { */ void (*track_flush_slot)(struct kvm *kvm, struct kvm_memory_slot *slot, struct kvm_page_track_notifier_node *node); + + /* + * Invoked when a memory region is removed from the guest. Or in KVM + * terms, when a memslot is deleted. + * + * @gfn: base gfn of the region being removed + * @nr_pages: number of pages in the to-be-removed region + * @node: this node + */ + void (*track_remove_region)(gfn_t gfn, unsigned long nr_pages, + struct kvm_page_track_notifier_node *node); }; int kvm_page_track_init(struct kvm *kvm); @@ -75,6 +86,7 @@ kvm_page_track_unregister_notifier(struct kvm *kvm, void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, int bytes); void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot); +void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot); bool kvm_page_track_has_external_user(struct kvm *kvm); diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index cfd0b8092d06..640e383b5252 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -270,7 +270,7 @@ void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, idx = srcu_read_lock(&head->track_srcu); hlist_for_each_entry_srcu(n, &head->track_notifier_list, node, - srcu_read_lock_held(&head->track_srcu)) + srcu_read_lock_held(&head->track_srcu)) if (n->track_write) n->track_write(gpa, new, bytes, n); srcu_read_unlock(&head->track_srcu, idx); @@ -298,12 +298,35 @@ void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot) idx = srcu_read_lock(&head->track_srcu); hlist_for_each_entry_srcu(n, &head->track_notifier_list, node, - srcu_read_lock_held(&head->track_srcu)) + srcu_read_lock_held(&head->track_srcu)) if (n->track_flush_slot) n->track_flush_slot(kvm, slot, n); srcu_read_unlock(&head->track_srcu, idx); } +/* + * Notify external page track nodes that a memory region is being removed from + * the VM, e.g. so that users can free any associated metadata. + */ +void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot) +{ + struct kvm_page_track_notifier_head *head; + struct kvm_page_track_notifier_node *n; + int idx; + + head = &kvm->arch.track_notifier_head; + + if (hlist_empty(&head->track_notifier_list)) + return; + + idx = srcu_read_lock(&head->track_srcu); + hlist_for_each_entry_srcu(n, &head->track_notifier_list, node, + srcu_read_lock_held(&head->track_srcu)) + if (n->track_remove_region) + n->track_remove_region(slot->base_gfn, slot->npages, n); + srcu_read_unlock(&head->track_srcu, idx); +} + bool kvm_page_track_has_external_user(struct kvm *kvm) { return !hlist_empty(&kvm->arch.track_notifier_head.track_notifier_list); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 161e97e6caa9..792846447593 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -12793,6 +12793,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_memory_slot *new, enum kvm_mr_change change) { + if (change == KVM_MR_DELETE) + kvm_page_track_delete_slot(kvm, old); + if (!kvm->arch.n_requested_mmu_pages && (change == KVM_MR_CREATE || change == KVM_MR_DELETE)) { unsigned long nr_mmu_pages; -- cgit From c15fcf12ffb37be314752202fb02cf16138e66fb Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Fri, 28 Jul 2023 18:35:25 -0700 Subject: drm/i915/gvt: switch from ->track_flush_slot() to ->track_remove_region() Switch from the poorly named and flawed ->track_flush_slot() to the newly introduced ->track_remove_region(). From KVMGT's perspective, the two hooks are functionally equivalent, the only difference being that ->track_remove_region() is called only when KVM is 100% certain the memory region will be removed, i.e. is invoked slightly later in KVM's memslot modification flow. Cc: Zhenyu Wang Suggested-by: Sean Christopherson Signed-off-by: Yan Zhao [sean: handle name change, massage changelog, rebase] Tested-by: Yan Zhao Tested-by: Yongwei Ma Reviewed-by: Zhi Wang Link: https://lore.kernel.org/r/20230729013535.1070024-20-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- drivers/gpu/drm/i915/gvt/kvmgt.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 3ea3cb9eb599..3f2327455d85 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -108,9 +108,8 @@ struct gvt_dma { static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len, struct kvm_page_track_notifier_node *node); -static void kvmgt_page_track_flush_slot(struct kvm *kvm, - struct kvm_memory_slot *slot, - struct kvm_page_track_notifier_node *node); +static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages, + struct kvm_page_track_notifier_node *node); static ssize_t intel_vgpu_show_description(struct mdev_type *mtype, char *buf) { @@ -666,7 +665,7 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev) return -EEXIST; vgpu->track_node.track_write = kvmgt_page_track_write; - vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot; + vgpu->track_node.track_remove_region = kvmgt_page_track_remove_region; kvm_get_kvm(vgpu->vfio_device.kvm); kvm_page_track_register_notifier(vgpu->vfio_device.kvm, &vgpu->track_node); @@ -1617,22 +1616,20 @@ static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len, mutex_unlock(&info->vgpu_lock); } -static void kvmgt_page_track_flush_slot(struct kvm *kvm, - struct kvm_memory_slot *slot, - struct kvm_page_track_notifier_node *node) +static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages, + struct kvm_page_track_notifier_node *node) { unsigned long i; - gfn_t gfn; struct intel_vgpu *info = container_of(node, struct intel_vgpu, track_node); mutex_lock(&info->vgpu_lock); - for (i = 0; i < slot->npages; i++) { - gfn = slot->base_gfn + i; - if (kvmgt_gfn_is_write_protected(info, gfn)) - kvmgt_protect_table_del(info, gfn); + for (i = 0; i < nr_pages; i++) { + if (kvmgt_gfn_is_write_protected(info, gfn + i)) + kvmgt_protect_table_del(info, gfn + i); } + mutex_unlock(&info->vgpu_lock); } -- cgit From d104d5bbbc2de62fafe0e391dbc7b1e99a58722e Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Fri, 28 Jul 2023 18:35:26 -0700 Subject: KVM: x86: Remove the unused page-track hook track_flush_slot() Remove ->track_remove_slot(), there are no longer any users and it's unlikely a "flush" hook will ever be the correct API to provide to an external page-track user. Cc: Zhenyu Wang Suggested-by: Sean Christopherson Signed-off-by: Yan Zhao Tested-by: Yongwei Ma Link: https://lore.kernel.org/r/20230729013535.1070024-21-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 11 ----------- arch/x86/kvm/mmu/mmu.c | 2 -- arch/x86/kvm/mmu/page_track.c | 26 -------------------------- 3 files changed, 39 deletions(-) diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index cfd36c22b467..5c348ffdc194 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -33,16 +33,6 @@ struct kvm_page_track_notifier_node { */ void (*track_write)(gpa_t gpa, const u8 *new, int bytes, struct kvm_page_track_notifier_node *node); - /* - * It is called when memory slot is being moved or removed - * users can drop write-protection for the pages in that memory slot - * - * @kvm: the kvm where memory slot being moved or removed - * @slot: the memory slot being moved or removed - * @node: this node - */ - void (*track_flush_slot)(struct kvm *kvm, struct kvm_memory_slot *slot, - struct kvm_page_track_notifier_node *node); /* * Invoked when a memory region is removed from the guest. Or in KVM @@ -85,7 +75,6 @@ kvm_page_track_unregister_notifier(struct kvm *kvm, struct kvm_page_track_notifier_node *n); void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, int bytes); -void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot); void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot); bool kvm_page_track_has_external_user(struct kvm *kvm); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 28385642eb98..90a528dca883 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -6695,8 +6695,6 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_mmu_zap_all_fast(kvm); - - kvm_page_track_flush_slot(kvm, slot); } void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 640e383b5252..98fb96357b84 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -278,32 +278,6 @@ void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, kvm_mmu_track_write(vcpu, gpa, new, bytes); } -/* - * Notify the node that memory slot is being removed or moved so that it can - * drop write-protection for the pages in the memory slot. - * - * The node should figure out it has any write-protected pages in this slot - * by itself. - */ -void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot) -{ - struct kvm_page_track_notifier_head *head; - struct kvm_page_track_notifier_node *n; - int idx; - - head = &kvm->arch.track_notifier_head; - - if (hlist_empty(&head->track_notifier_list)) - return; - - idx = srcu_read_lock(&head->track_srcu); - hlist_for_each_entry_srcu(n, &head->track_notifier_list, node, - srcu_read_lock_held(&head->track_srcu)) - if (n->track_flush_slot) - n->track_flush_slot(kvm, slot, n); - srcu_read_unlock(&head->track_srcu, idx); -} - /* * Notify external page track nodes that a memory region is being removed from * the VM, e.g. so that users can free any associated metadata. -- cgit From 58ea7cf700cae441fd41d17e6f8092a4f9e2e32b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:27 -0700 Subject: KVM: x86/mmu: Move KVM-only page-track declarations to internal header Bury the declaration of the page-track helpers that are intended only for internal KVM use in a "private" header. In addition to guarding against unwanted usage of the internal-only helpers, dropping their definitions avoids exposing other structures that should be KVM-internal, e.g. for memslots. This is a baby step toward making kvm_host.h a KVM-internal header in the very distant future. Tested-by: Yongwei Ma Link: https://lore.kernel.org/r/20230729013535.1070024-22-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 21 ++------------------- arch/x86/kvm/mmu/mmu.c | 3 ++- arch/x86/kvm/mmu/page_track.c | 8 +------- arch/x86/kvm/mmu/page_track.h | 33 +++++++++++++++++++++++++++++++++ arch/x86/kvm/x86.c | 1 + 5 files changed, 39 insertions(+), 27 deletions(-) create mode 100644 arch/x86/kvm/mmu/page_track.h diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 5c348ffdc194..76c0070dfe2a 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -2,6 +2,8 @@ #ifndef _ASM_X86_KVM_PAGE_TRACK_H #define _ASM_X86_KVM_PAGE_TRACK_H +#include + enum kvm_page_track_mode { KVM_PAGE_TRACK_WRITE, KVM_PAGE_TRACK_MAX, @@ -46,26 +48,12 @@ struct kvm_page_track_notifier_node { struct kvm_page_track_notifier_node *node); }; -int kvm_page_track_init(struct kvm *kvm); -void kvm_page_track_cleanup(struct kvm *kvm); - -bool kvm_page_track_write_tracking_enabled(struct kvm *kvm); -int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot); - -void kvm_page_track_free_memslot(struct kvm_memory_slot *slot); -int kvm_page_track_create_memslot(struct kvm *kvm, - struct kvm_memory_slot *slot, - unsigned long npages); - void kvm_slot_page_track_add_page(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode); void kvm_slot_page_track_remove_page(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode); -bool kvm_slot_page_track_is_active(struct kvm *kvm, - const struct kvm_memory_slot *slot, - gfn_t gfn, enum kvm_page_track_mode mode); void kvm_page_track_register_notifier(struct kvm *kvm, @@ -73,10 +61,5 @@ kvm_page_track_register_notifier(struct kvm *kvm, void kvm_page_track_unregister_notifier(struct kvm *kvm, struct kvm_page_track_notifier_node *n); -void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, - int bytes); -void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot); - -bool kvm_page_track_has_external_user(struct kvm *kvm); #endif diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 90a528dca883..14b5c74a9247 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -25,6 +25,7 @@ #include "kvm_cache_regs.h" #include "smm.h" #include "kvm_emulate.h" +#include "page_track.h" #include "cpuid.h" #include "spte.h" @@ -53,7 +54,7 @@ #include #include #include -#include + #include "trace.h" extern bool itlb_multihit_kvm_mitigation; diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 98fb96357b84..ba3261df9b63 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -15,10 +15,9 @@ #include #include -#include - #include "mmu.h" #include "mmu_internal.h" +#include "page_track.h" bool kvm_page_track_write_tracking_enabled(struct kvm *kvm) { @@ -300,8 +299,3 @@ void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot) n->track_remove_region(slot->base_gfn, slot->npages, n); srcu_read_unlock(&head->track_srcu, idx); } - -bool kvm_page_track_has_external_user(struct kvm *kvm) -{ - return !hlist_empty(&kvm->arch.track_notifier_head.track_notifier_list); -} diff --git a/arch/x86/kvm/mmu/page_track.h b/arch/x86/kvm/mmu/page_track.h new file mode 100644 index 000000000000..cc1c8094bd73 --- /dev/null +++ b/arch/x86/kvm/mmu/page_track.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KVM_X86_PAGE_TRACK_H +#define __KVM_X86_PAGE_TRACK_H + +#include + +#include + +int kvm_page_track_init(struct kvm *kvm); +void kvm_page_track_cleanup(struct kvm *kvm); + +bool kvm_page_track_write_tracking_enabled(struct kvm *kvm); +int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot); + +void kvm_page_track_free_memslot(struct kvm_memory_slot *slot); +int kvm_page_track_create_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot, + unsigned long npages); + +bool kvm_slot_page_track_is_active(struct kvm *kvm, + const struct kvm_memory_slot *slot, + gfn_t gfn, enum kvm_page_track_mode mode); + +void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, + int bytes); +void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot); + +static inline bool kvm_page_track_has_external_user(struct kvm *kvm) +{ + return !hlist_empty(&kvm->arch.track_notifier_head.track_notifier_list); +} + +#endif /* __KVM_X86_PAGE_TRACK_H */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 792846447593..6c9c81e82e65 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -25,6 +25,7 @@ #include "tss.h" #include "kvm_cache_regs.h" #include "kvm_emulate.h" +#include "mmu/page_track.h" #include "x86.h" #include "cpuid.h" #include "pmu.h" -- cgit From e998fb1a30131fdc7c734bc7422000b41146b631 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:28 -0700 Subject: KVM: x86/mmu: Use page-track notifiers iff there are external users Disable the page-track notifier code at compile time if there are no external users, i.e. if CONFIG_KVM_EXTERNAL_WRITE_TRACKING=n. KVM itself now hooks emulated writes directly instead of relying on the page-track mechanism. Provide a stub for "struct kvm_page_track_notifier_node" so that including headers directly from the command line, e.g. for testing include guards, doesn't fail due to a struct having an incomplete type. Reviewed-by: Yan Zhao Tested-by: Yongwei Ma Link: https://lore.kernel.org/r/20230729013535.1070024-23-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/include/asm/kvm_page_track.h | 22 +++++++++++++++------- arch/x86/kvm/mmu/page_track.c | 10 +++++----- arch/x86/kvm/mmu/page_track.h | 29 +++++++++++++++++++++++++---- 4 files changed, 47 insertions(+), 16 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 93fdc42458cc..29ad3f0a126f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1265,7 +1265,9 @@ struct kvm_arch { * create an NX huge page (without hanging the guest). */ struct list_head possible_nx_huge_pages; +#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING struct kvm_page_track_notifier_head track_notifier_head; +#endif /* * Protects marking pages unsync during page faults, as TDP MMU page * faults only take mmu_lock for read. For simplicity, the unsync diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 76c0070dfe2a..61adb07b5927 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -9,6 +9,14 @@ enum kvm_page_track_mode { KVM_PAGE_TRACK_MAX, }; +void kvm_slot_page_track_add_page(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode); +void kvm_slot_page_track_remove_page(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode); + +#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING /* * The notifier represented by @kvm_page_track_notifier_node is linked into * the head which will be notified when guest is triggering the track event. @@ -48,18 +56,18 @@ struct kvm_page_track_notifier_node { struct kvm_page_track_notifier_node *node); }; -void kvm_slot_page_track_add_page(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn, - enum kvm_page_track_mode mode); -void kvm_slot_page_track_remove_page(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn, - enum kvm_page_track_mode mode); - void kvm_page_track_register_notifier(struct kvm *kvm, struct kvm_page_track_notifier_node *n); void kvm_page_track_unregister_notifier(struct kvm *kvm, struct kvm_page_track_notifier_node *n); +#else +/* + * Allow defining a node in a structure even if page tracking is disabled, e.g. + * to play nice with testing headers via direct inclusion from the command line. + */ +struct kvm_page_track_notifier_node {}; +#endif /* CONFIG_KVM_EXTERNAL_WRITE_TRACKING */ #endif diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index ba3261df9b63..108075001f5c 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -194,6 +194,7 @@ bool kvm_slot_page_track_is_active(struct kvm *kvm, return !!READ_ONCE(slot->arch.gfn_track[mode][index]); } +#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING void kvm_page_track_cleanup(struct kvm *kvm) { struct kvm_page_track_notifier_head *head; @@ -255,14 +256,13 @@ EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier); * The node should figure out if the written page is the one that node is * interested in by itself. */ -void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, - int bytes) +void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes) { struct kvm_page_track_notifier_head *head; struct kvm_page_track_notifier_node *n; int idx; - head = &vcpu->kvm->arch.track_notifier_head; + head = &kvm->arch.track_notifier_head; if (hlist_empty(&head->track_notifier_list)) return; @@ -273,8 +273,6 @@ void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, if (n->track_write) n->track_write(gpa, new, bytes, n); srcu_read_unlock(&head->track_srcu, idx); - - kvm_mmu_track_write(vcpu, gpa, new, bytes); } /* @@ -299,3 +297,5 @@ void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot) n->track_remove_region(slot->base_gfn, slot->npages, n); srcu_read_unlock(&head->track_srcu, idx); } + +#endif diff --git a/arch/x86/kvm/mmu/page_track.h b/arch/x86/kvm/mmu/page_track.h index cc1c8094bd73..ac48c668937a 100644 --- a/arch/x86/kvm/mmu/page_track.h +++ b/arch/x86/kvm/mmu/page_track.h @@ -6,8 +6,6 @@ #include -int kvm_page_track_init(struct kvm *kvm); -void kvm_page_track_cleanup(struct kvm *kvm); bool kvm_page_track_write_tracking_enabled(struct kvm *kvm); int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot); @@ -21,13 +19,36 @@ bool kvm_slot_page_track_is_active(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode); -void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, - int bytes); +#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING +int kvm_page_track_init(struct kvm *kvm); +void kvm_page_track_cleanup(struct kvm *kvm); + +void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes); void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot); static inline bool kvm_page_track_has_external_user(struct kvm *kvm) { return !hlist_empty(&kvm->arch.track_notifier_head.track_notifier_list); } +#else +static inline int kvm_page_track_init(struct kvm *kvm) { return 0; } +static inline void kvm_page_track_cleanup(struct kvm *kvm) { } + +static inline void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, + const u8 *new, int bytes) { } +static inline void kvm_page_track_delete_slot(struct kvm *kvm, + struct kvm_memory_slot *slot) { } + +static inline bool kvm_page_track_has_external_user(struct kvm *kvm) { return false; } + +#endif /* CONFIG_KVM_EXTERNAL_WRITE_TRACKING */ + +static inline void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, + const u8 *new, int bytes) +{ + __kvm_page_track_write(vcpu->kvm, gpa, new, bytes); + + kvm_mmu_track_write(vcpu, gpa, new, bytes); +} #endif /* __KVM_X86_PAGE_TRACK_H */ -- cgit From 338068b5bec4dfa11cf50eb8c1839d1e27749395 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:29 -0700 Subject: KVM: x86/mmu: Drop infrastructure for multiple page-track modes Drop "support" for multiple page-track modes, as there is no evidence that array-based and refcounted metadata is the optimal solution for other modes, nor is there any evidence that other use cases, e.g. for access-tracking, will be a good fit for the page-track machinery in general. E.g. one potential use case of access-tracking would be to prevent guest access to poisoned memory (from the guest's perspective). In that case, the number of poisoned pages is likely to be a very small percentage of the guest memory, and there is no need to reference count the number of access-tracking users, i.e. expanding gfn_track[] for a new mode would be grossly inefficient. And for poisoned memory, host userspace would also likely want to trap accesses, e.g. to inject #MC into the guest, and that isn't currently supported by the page-track framework. A better alternative for that poisoned page use case is likely a variation of the proposed per-gfn attributes overlay (linked), which would allow efficiently tracking the sparse set of poisoned pages, and by default would exit to userspace on access. Link: https://lore.kernel.org/all/Y2WB48kD0J4VGynX@google.com Cc: Ben Gardon Tested-by: Yongwei Ma Link: https://lore.kernel.org/r/20230729013535.1070024-24-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 12 ++-- arch/x86/include/asm/kvm_page_track.h | 11 +--- arch/x86/kvm/mmu/mmu.c | 14 ++--- arch/x86/kvm/mmu/page_track.c | 106 ++++++++++------------------------ arch/x86/kvm/mmu/page_track.h | 3 +- drivers/gpu/drm/i915/gvt/kvmgt.c | 4 +- 6 files changed, 48 insertions(+), 102 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 29ad3f0a126f..1a4def36d5bb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -288,13 +288,13 @@ struct kvm_kernel_irq_routing_entry; * kvm_mmu_page_role tracks the properties of a shadow page (where shadow page * also includes TDP pages) to determine whether or not a page can be used in * the given MMU context. This is a subset of the overall kvm_cpu_role to - * minimize the size of kvm_memory_slot.arch.gfn_track, i.e. allows allocating - * 2 bytes per gfn instead of 4 bytes per gfn. + * minimize the size of kvm_memory_slot.arch.gfn_write_track, i.e. allows + * allocating 2 bytes per gfn instead of 4 bytes per gfn. * * Upper-level shadow pages having gptes are tracked for write-protection via - * gfn_track. As above, gfn_track is a 16 bit counter, so KVM must not create - * more than 2^16-1 upper-level shadow pages at a single gfn, otherwise - * gfn_track will overflow and explosions will ensure. + * gfn_write_track. As above, gfn_write_track is a 16 bit counter, so KVM must + * not create more than 2^16-1 upper-level shadow pages at a single gfn, + * otherwise gfn_write_track will overflow and explosions will ensue. * * A unique shadow page (SP) for a gfn is created if and only if an existing SP * cannot be reused. The ability to reuse a SP is tracked by its role, which @@ -1023,7 +1023,7 @@ struct kvm_lpage_info { struct kvm_arch_memory_slot { struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES]; struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; - unsigned short *gfn_track[KVM_PAGE_TRACK_MAX]; + unsigned short *gfn_write_track; }; /* diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 61adb07b5927..9e4ee26d1779 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -4,17 +4,10 @@ #include -enum kvm_page_track_mode { - KVM_PAGE_TRACK_WRITE, - KVM_PAGE_TRACK_MAX, -}; - void kvm_slot_page_track_add_page(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn, - enum kvm_page_track_mode mode); + struct kvm_memory_slot *slot, gfn_t gfn); void kvm_slot_page_track_remove_page(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn, - enum kvm_page_track_mode mode); + struct kvm_memory_slot *slot, gfn_t gfn); #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING /* diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 14b5c74a9247..2d76ead9ddac 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -831,8 +831,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) /* the non-leaf shadow pages are keeping readonly. */ if (sp->role.level > PG_LEVEL_4K) - return kvm_slot_page_track_add_page(kvm, slot, gfn, - KVM_PAGE_TRACK_WRITE); + return kvm_slot_page_track_add_page(kvm, slot, gfn); kvm_mmu_gfn_disallow_lpage(slot, gfn); @@ -878,8 +877,7 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) slots = kvm_memslots_for_spte_role(kvm, sp->role); slot = __gfn_to_memslot(slots, gfn); if (sp->role.level > PG_LEVEL_4K) - return kvm_slot_page_track_remove_page(kvm, slot, gfn, - KVM_PAGE_TRACK_WRITE); + return kvm_slot_page_track_remove_page(kvm, slot, gfn); kvm_mmu_gfn_allow_lpage(slot, gfn); } @@ -2809,7 +2807,7 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, * track machinery is used to write-protect upper-level shadow pages, * i.e. this guards the role.level == 4K assertion below! */ - if (kvm_slot_page_track_is_active(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE)) + if (kvm_slot_page_track_is_active(kvm, slot, gfn)) return -EPERM; /* @@ -4203,7 +4201,7 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, * guest is writing the page which is write tracked which can * not be fixed by page fault handler. */ - if (kvm_slot_page_track_is_active(vcpu->kvm, fault->slot, fault->gfn, KVM_PAGE_TRACK_WRITE)) + if (kvm_slot_page_track_is_active(vcpu->kvm, fault->slot, fault->gfn)) return true; return false; @@ -5422,8 +5420,8 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) * physical address properties) in a single VM would require tracking * all relevant CPUID information in kvm_mmu_page_role. That is very * undesirable as it would increase the memory requirements for - * gfn_track (see struct kvm_mmu_page_role comments). For now that - * problem is swept under the rug; KVM's CPUID API is horrific and + * gfn_write_track (see struct kvm_mmu_page_role comments). For now + * that problem is swept under the rug; KVM's CPUID API is horrific and * it's all but impossible to solve it without introducing a new API. */ vcpu->arch.root_mmu.root_role.word = 0; diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 108075001f5c..9d0b1a50f2fd 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -27,76 +27,50 @@ bool kvm_page_track_write_tracking_enabled(struct kvm *kvm) void kvm_page_track_free_memslot(struct kvm_memory_slot *slot) { - int i; - - for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { - kvfree(slot->arch.gfn_track[i]); - slot->arch.gfn_track[i] = NULL; - } + kvfree(slot->arch.gfn_write_track); + slot->arch.gfn_write_track = NULL; } -int kvm_page_track_create_memslot(struct kvm *kvm, - struct kvm_memory_slot *slot, - unsigned long npages) +static int __kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot, + unsigned long npages) { - int i; - - for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { - if (i == KVM_PAGE_TRACK_WRITE && - !kvm_page_track_write_tracking_enabled(kvm)) - continue; - - slot->arch.gfn_track[i] = - __vcalloc(npages, sizeof(*slot->arch.gfn_track[i]), - GFP_KERNEL_ACCOUNT); - if (!slot->arch.gfn_track[i]) - goto track_free; - } + const size_t size = sizeof(*slot->arch.gfn_write_track); - return 0; + if (!slot->arch.gfn_write_track) + slot->arch.gfn_write_track = __vcalloc(npages, size, + GFP_KERNEL_ACCOUNT); -track_free: - kvm_page_track_free_memslot(slot); - return -ENOMEM; + return slot->arch.gfn_write_track ? 0 : -ENOMEM; } -static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode) +int kvm_page_track_create_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot, + unsigned long npages) { - if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX) - return false; + if (!kvm_page_track_write_tracking_enabled(kvm)) + return 0; - return true; + return __kvm_page_track_write_tracking_alloc(slot, npages); } int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot) { - unsigned short *gfn_track; - - if (slot->arch.gfn_track[KVM_PAGE_TRACK_WRITE]) - return 0; - - gfn_track = __vcalloc(slot->npages, sizeof(*gfn_track), - GFP_KERNEL_ACCOUNT); - if (gfn_track == NULL) - return -ENOMEM; - - slot->arch.gfn_track[KVM_PAGE_TRACK_WRITE] = gfn_track; - return 0; + return __kvm_page_track_write_tracking_alloc(slot, slot->npages); } -static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, - enum kvm_page_track_mode mode, short count) +static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn, + short count) { int index, val; index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); - val = slot->arch.gfn_track[mode][index]; + val = slot->arch.gfn_write_track[index]; if (WARN_ON_ONCE(val + count < 0 || val + count > USHRT_MAX)) return; - slot->arch.gfn_track[mode][index] += count; + slot->arch.gfn_write_track[index] += count; } /* @@ -109,21 +83,14 @@ static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, * @kvm: the guest instance we are interested in. * @slot: the @gfn belongs to. * @gfn: the guest page. - * @mode: tracking mode, currently only write track is supported. */ void kvm_slot_page_track_add_page(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn, - enum kvm_page_track_mode mode) + struct kvm_memory_slot *slot, gfn_t gfn) { - - if (WARN_ON_ONCE(!page_track_mode_is_valid(mode))) + if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm))) return; - if (WARN_ON_ONCE(mode == KVM_PAGE_TRACK_WRITE && - !kvm_page_track_write_tracking_enabled(kvm))) - return; - - update_gfn_track(slot, gfn, mode, 1); + update_gfn_write_track(slot, gfn, 1); /* * new track stops large page mapping for the @@ -131,9 +98,8 @@ void kvm_slot_page_track_add_page(struct kvm *kvm, */ kvm_mmu_gfn_disallow_lpage(slot, gfn); - if (mode == KVM_PAGE_TRACK_WRITE) - if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) - kvm_flush_remote_tlbs(kvm); + if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) + kvm_flush_remote_tlbs(kvm); } EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page); @@ -148,20 +114,14 @@ EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page); * @kvm: the guest instance we are interested in. * @slot: the @gfn belongs to. * @gfn: the guest page. - * @mode: tracking mode, currently only write track is supported. */ void kvm_slot_page_track_remove_page(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn, - enum kvm_page_track_mode mode) + struct kvm_memory_slot *slot, gfn_t gfn) { - if (WARN_ON_ONCE(!page_track_mode_is_valid(mode))) - return; - - if (WARN_ON_ONCE(mode == KVM_PAGE_TRACK_WRITE && - !kvm_page_track_write_tracking_enabled(kvm))) + if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm))) return; - update_gfn_track(slot, gfn, mode, -1); + update_gfn_write_track(slot, gfn, -1); /* * allow large page mapping for the tracked page @@ -176,22 +136,18 @@ EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page); */ bool kvm_slot_page_track_is_active(struct kvm *kvm, const struct kvm_memory_slot *slot, - gfn_t gfn, enum kvm_page_track_mode mode) + gfn_t gfn) { int index; - if (WARN_ON_ONCE(!page_track_mode_is_valid(mode))) - return false; - if (!slot) return false; - if (mode == KVM_PAGE_TRACK_WRITE && - !kvm_page_track_write_tracking_enabled(kvm)) + if (!kvm_page_track_write_tracking_enabled(kvm)) return false; index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); - return !!READ_ONCE(slot->arch.gfn_track[mode][index]); + return !!READ_ONCE(slot->arch.gfn_write_track[index]); } #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING diff --git a/arch/x86/kvm/mmu/page_track.h b/arch/x86/kvm/mmu/page_track.h index ac48c668937a..48a5377395dc 100644 --- a/arch/x86/kvm/mmu/page_track.h +++ b/arch/x86/kvm/mmu/page_track.h @@ -16,8 +16,7 @@ int kvm_page_track_create_memslot(struct kvm *kvm, unsigned long npages); bool kvm_slot_page_track_is_active(struct kvm *kvm, - const struct kvm_memory_slot *slot, - gfn_t gfn, enum kvm_page_track_mode mode); + const struct kvm_memory_slot *slot, gfn_t gfn); #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING int kvm_page_track_init(struct kvm *kvm); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 3f2327455d85..e71182b8a3f2 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1564,7 +1564,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) } write_lock(&kvm->mmu_lock); - kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); + kvm_slot_page_track_add_page(kvm, slot, gfn); write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); @@ -1593,7 +1593,7 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) } write_lock(&kvm->mmu_lock); - kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); + kvm_slot_page_track_remove_page(kvm, slot, gfn); write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); -- cgit From 7b574863e71833b5a4907245c1d95e76d507b984 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:30 -0700 Subject: KVM: x86/mmu: Rename page-track APIs to reflect the new reality Rename the page-track APIs to capture that they're all about tracking writes, now that the facade of supporting multiple modes is gone. Opportunstically replace "slot" with "gfn" in anticipation of removing the @slot param from the external APIs. No functional change intended. Tested-by: Yongwei Ma Link: https://lore.kernel.org/r/20230729013535.1070024-25-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 8 ++++---- arch/x86/kvm/mmu/mmu.c | 8 ++++---- arch/x86/kvm/mmu/page_track.c | 20 +++++++++----------- arch/x86/kvm/mmu/page_track.h | 4 ++-- drivers/gpu/drm/i915/gvt/kvmgt.c | 4 ++-- 5 files changed, 21 insertions(+), 23 deletions(-) diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 9e4ee26d1779..f5c1db36cdb7 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -4,10 +4,10 @@ #include -void kvm_slot_page_track_add_page(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn); -void kvm_slot_page_track_remove_page(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn); +void kvm_write_track_add_gfn(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn); +void kvm_write_track_remove_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn); #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING /* diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 2d76ead9ddac..cc9a1e627d7e 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -831,7 +831,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) /* the non-leaf shadow pages are keeping readonly. */ if (sp->role.level > PG_LEVEL_4K) - return kvm_slot_page_track_add_page(kvm, slot, gfn); + return kvm_write_track_add_gfn(kvm, slot, gfn); kvm_mmu_gfn_disallow_lpage(slot, gfn); @@ -877,7 +877,7 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) slots = kvm_memslots_for_spte_role(kvm, sp->role); slot = __gfn_to_memslot(slots, gfn); if (sp->role.level > PG_LEVEL_4K) - return kvm_slot_page_track_remove_page(kvm, slot, gfn); + return kvm_write_track_remove_gfn(kvm, slot, gfn); kvm_mmu_gfn_allow_lpage(slot, gfn); } @@ -2807,7 +2807,7 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, * track machinery is used to write-protect upper-level shadow pages, * i.e. this guards the role.level == 4K assertion below! */ - if (kvm_slot_page_track_is_active(kvm, slot, gfn)) + if (kvm_gfn_is_write_tracked(kvm, slot, gfn)) return -EPERM; /* @@ -4201,7 +4201,7 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, * guest is writing the page which is write tracked which can * not be fixed by page fault handler. */ - if (kvm_slot_page_track_is_active(vcpu->kvm, fault->slot, fault->gfn)) + if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn)) return true; return false; diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 9d0b1a50f2fd..dd6765bc385e 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -84,8 +84,8 @@ static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn, * @slot: the @gfn belongs to. * @gfn: the guest page. */ -void kvm_slot_page_track_add_page(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn) +void kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn) { if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm))) return; @@ -101,12 +101,11 @@ void kvm_slot_page_track_add_page(struct kvm *kvm, if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) kvm_flush_remote_tlbs(kvm); } -EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page); +EXPORT_SYMBOL_GPL(kvm_write_track_add_gfn); /* * remove the guest page from the tracking pool which stops the interception - * of corresponding access on that page. It is the opposed operation of - * kvm_slot_page_track_add_page(). + * of corresponding access on that page. * * It should be called under the protection both of mmu-lock and kvm->srcu * or kvm->slots_lock. @@ -115,8 +114,8 @@ EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page); * @slot: the @gfn belongs to. * @gfn: the guest page. */ -void kvm_slot_page_track_remove_page(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn) +void kvm_write_track_remove_gfn(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn) { if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm))) return; @@ -129,14 +128,13 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm, */ kvm_mmu_gfn_allow_lpage(slot, gfn); } -EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page); +EXPORT_SYMBOL_GPL(kvm_write_track_remove_gfn); /* * check if the corresponding access on the specified guest page is tracked. */ -bool kvm_slot_page_track_is_active(struct kvm *kvm, - const struct kvm_memory_slot *slot, - gfn_t gfn) +bool kvm_gfn_is_write_tracked(struct kvm *kvm, + const struct kvm_memory_slot *slot, gfn_t gfn) { int index; diff --git a/arch/x86/kvm/mmu/page_track.h b/arch/x86/kvm/mmu/page_track.h index 48a5377395dc..b020f998ee2c 100644 --- a/arch/x86/kvm/mmu/page_track.h +++ b/arch/x86/kvm/mmu/page_track.h @@ -15,8 +15,8 @@ int kvm_page_track_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages); -bool kvm_slot_page_track_is_active(struct kvm *kvm, - const struct kvm_memory_slot *slot, gfn_t gfn); +bool kvm_gfn_is_write_tracked(struct kvm *kvm, + const struct kvm_memory_slot *slot, gfn_t gfn); #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING int kvm_page_track_init(struct kvm *kvm); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index e71182b8a3f2..05a7e614ead0 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1564,7 +1564,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) } write_lock(&kvm->mmu_lock); - kvm_slot_page_track_add_page(kvm, slot, gfn); + kvm_write_track_add_gfn(kvm, slot, gfn); write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); @@ -1593,7 +1593,7 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) } write_lock(&kvm->mmu_lock); - kvm_slot_page_track_remove_page(kvm, slot, gfn); + kvm_write_track_remove_gfn(kvm, slot, gfn); write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); -- cgit From e18c5429e0c4bf7b550211245dcbfc61e71a7e6f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:31 -0700 Subject: KVM: x86/mmu: Assert that correct locks are held for page write-tracking When adding/removing gfns to/from write-tracking, assert that mmu_lock is held for write, and that either slots_lock or kvm->srcu is held. mmu_lock must be held for write to protect gfn_write_track's refcount, and SRCU or slots_lock must be held to protect the memslot itself. Tested-by: Yan Zhao Tested-by: Yongwei Ma Link: https://lore.kernel.org/r/20230729013535.1070024-26-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/page_track.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index dd6765bc385e..2c6b912072b7 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -12,6 +12,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include @@ -77,9 +78,6 @@ static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn, * add guest page to the tracking pool so that corresponding access on that * page will be intercepted. * - * It should be called under the protection both of mmu-lock and kvm->srcu - * or kvm->slots_lock. - * * @kvm: the guest instance we are interested in. * @slot: the @gfn belongs to. * @gfn: the guest page. @@ -87,6 +85,11 @@ static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn, void kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) { + lockdep_assert_held_write(&kvm->mmu_lock); + + lockdep_assert_once(lockdep_is_held(&kvm->slots_lock) || + srcu_read_lock_held(&kvm->srcu)); + if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm))) return; @@ -107,9 +110,6 @@ EXPORT_SYMBOL_GPL(kvm_write_track_add_gfn); * remove the guest page from the tracking pool which stops the interception * of corresponding access on that page. * - * It should be called under the protection both of mmu-lock and kvm->srcu - * or kvm->slots_lock. - * * @kvm: the guest instance we are interested in. * @slot: the @gfn belongs to. * @gfn: the guest page. @@ -117,6 +117,11 @@ EXPORT_SYMBOL_GPL(kvm_write_track_add_gfn); void kvm_write_track_remove_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) { + lockdep_assert_held_write(&kvm->mmu_lock); + + lockdep_assert_once(lockdep_is_held(&kvm->slots_lock) || + srcu_read_lock_held(&kvm->srcu)); + if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm))) return; -- cgit From 427c76aed29ec12a57f11546a5036df3cc52855e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:32 -0700 Subject: KVM: x86/mmu: Bug the VM if write-tracking is used but not enabled Bug the VM if something attempts to write-track a gfn, but write-tracking isn't enabled. The VM is doomed (and KVM has an egregious bug) if KVM or KVMGT wants to shadow guest page tables but can't because write-tracking isn't enabled. Tested-by: Yongwei Ma Link: https://lore.kernel.org/r/20230729013535.1070024-27-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/page_track.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 2c6b912072b7..9bf01311ee9c 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -90,7 +90,7 @@ void kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, lockdep_assert_once(lockdep_is_held(&kvm->slots_lock) || srcu_read_lock_held(&kvm->srcu)); - if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm))) + if (KVM_BUG_ON(!kvm_page_track_write_tracking_enabled(kvm), kvm)) return; update_gfn_write_track(slot, gfn, 1); @@ -122,7 +122,7 @@ void kvm_write_track_remove_gfn(struct kvm *kvm, lockdep_assert_once(lockdep_is_held(&kvm->slots_lock) || srcu_read_lock_held(&kvm->srcu)); - if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm))) + if (KVM_BUG_ON(!kvm_page_track_write_tracking_enabled(kvm), kvm)) return; update_gfn_write_track(slot, gfn, -1); -- cgit From 96316a06700fc93140e7492c9e994045680f7272 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:33 -0700 Subject: KVM: x86/mmu: Drop @slot param from exported/external page-track APIs Refactor KVM's exported/external page-track, a.k.a. write-track, APIs to take only the gfn and do the required memslot lookup in KVM proper. Forcing users of the APIs to get the memslot unnecessarily bleeds KVM internals into KVMGT and complicates usage of the APIs. No functional change intended. Reviewed-by: Yan Zhao Tested-by: Yongwei Ma Link: https://lore.kernel.org/r/20230729013535.1070024-28-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 7 +-- arch/x86/kvm/mmu/mmu.c | 4 +- arch/x86/kvm/mmu/page_track.c | 85 ++++++++++++++++++++++++++--------- arch/x86/kvm/mmu/page_track.h | 5 +++ drivers/gpu/drm/i915/gvt/kvmgt.c | 37 ++++----------- 5 files changed, 80 insertions(+), 58 deletions(-) diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index f5c1db36cdb7..4afab697e21c 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -4,11 +4,6 @@ #include -void kvm_write_track_add_gfn(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn); -void kvm_write_track_remove_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, - gfn_t gfn); - #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING /* * The notifier represented by @kvm_page_track_notifier_node is linked into @@ -55,6 +50,8 @@ kvm_page_track_register_notifier(struct kvm *kvm, void kvm_page_track_unregister_notifier(struct kvm *kvm, struct kvm_page_track_notifier_node *n); +int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn); +int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn); #else /* * Allow defining a node in a structure even if page tracking is disabled, e.g. diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index cc9a1e627d7e..060849d6aadf 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -831,7 +831,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) /* the non-leaf shadow pages are keeping readonly. */ if (sp->role.level > PG_LEVEL_4K) - return kvm_write_track_add_gfn(kvm, slot, gfn); + return __kvm_write_track_add_gfn(kvm, slot, gfn); kvm_mmu_gfn_disallow_lpage(slot, gfn); @@ -877,7 +877,7 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) slots = kvm_memslots_for_spte_role(kvm, sp->role); slot = __gfn_to_memslot(slots, gfn); if (sp->role.level > PG_LEVEL_4K) - return kvm_write_track_remove_gfn(kvm, slot, gfn); + return __kvm_write_track_remove_gfn(kvm, slot, gfn); kvm_mmu_gfn_allow_lpage(slot, gfn); } diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 9bf01311ee9c..c9ab8b587d25 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -74,16 +74,8 @@ static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn, slot->arch.gfn_write_track[index] += count; } -/* - * add guest page to the tracking pool so that corresponding access on that - * page will be intercepted. - * - * @kvm: the guest instance we are interested in. - * @slot: the @gfn belongs to. - * @gfn: the guest page. - */ -void kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, - gfn_t gfn) +void __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn) { lockdep_assert_held_write(&kvm->mmu_lock); @@ -104,18 +96,9 @@ void kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) kvm_flush_remote_tlbs(kvm); } -EXPORT_SYMBOL_GPL(kvm_write_track_add_gfn); -/* - * remove the guest page from the tracking pool which stops the interception - * of corresponding access on that page. - * - * @kvm: the guest instance we are interested in. - * @slot: the @gfn belongs to. - * @gfn: the guest page. - */ -void kvm_write_track_remove_gfn(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn) +void __kvm_write_track_remove_gfn(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn) { lockdep_assert_held_write(&kvm->mmu_lock); @@ -133,7 +116,6 @@ void kvm_write_track_remove_gfn(struct kvm *kvm, */ kvm_mmu_gfn_allow_lpage(slot, gfn); } -EXPORT_SYMBOL_GPL(kvm_write_track_remove_gfn); /* * check if the corresponding access on the specified guest page is tracked. @@ -257,4 +239,63 @@ void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot) srcu_read_unlock(&head->track_srcu, idx); } +/* + * add guest page to the tracking pool so that corresponding access on that + * page will be intercepted. + * + * @kvm: the guest instance we are interested in. + * @gfn: the guest page. + */ +int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn) +{ + struct kvm_memory_slot *slot; + int idx; + + idx = srcu_read_lock(&kvm->srcu); + + slot = gfn_to_memslot(kvm, gfn); + if (!slot) { + srcu_read_unlock(&kvm->srcu, idx); + return -EINVAL; + } + + write_lock(&kvm->mmu_lock); + __kvm_write_track_add_gfn(kvm, slot, gfn); + write_unlock(&kvm->mmu_lock); + + srcu_read_unlock(&kvm->srcu, idx); + + return 0; +} +EXPORT_SYMBOL_GPL(kvm_write_track_add_gfn); + +/* + * remove the guest page from the tracking pool which stops the interception + * of corresponding access on that page. + * + * @kvm: the guest instance we are interested in. + * @gfn: the guest page. + */ +int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn) +{ + struct kvm_memory_slot *slot; + int idx; + + idx = srcu_read_lock(&kvm->srcu); + + slot = gfn_to_memslot(kvm, gfn); + if (!slot) { + srcu_read_unlock(&kvm->srcu, idx); + return -EINVAL; + } + + write_lock(&kvm->mmu_lock); + __kvm_write_track_remove_gfn(kvm, slot, gfn); + write_unlock(&kvm->mmu_lock); + + srcu_read_unlock(&kvm->srcu, idx); + + return 0; +} +EXPORT_SYMBOL_GPL(kvm_write_track_remove_gfn); #endif diff --git a/arch/x86/kvm/mmu/page_track.h b/arch/x86/kvm/mmu/page_track.h index b020f998ee2c..d4d72ed999b1 100644 --- a/arch/x86/kvm/mmu/page_track.h +++ b/arch/x86/kvm/mmu/page_track.h @@ -15,6 +15,11 @@ int kvm_page_track_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages); +void __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn); +void __kvm_write_track_remove_gfn(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn); + bool kvm_gfn_is_write_tracked(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 05a7e614ead0..21342a93e418 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1546,9 +1546,7 @@ static struct mdev_driver intel_vgpu_mdev_driver = { int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) { - struct kvm *kvm = info->vfio_device.kvm; - struct kvm_memory_slot *slot; - int idx; + int r; if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status)) return -ESRCH; @@ -1556,18 +1554,9 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) if (kvmgt_gfn_is_write_protected(info, gfn)) return 0; - idx = srcu_read_lock(&kvm->srcu); - slot = gfn_to_memslot(kvm, gfn); - if (!slot) { - srcu_read_unlock(&kvm->srcu, idx); - return -EINVAL; - } - - write_lock(&kvm->mmu_lock); - kvm_write_track_add_gfn(kvm, slot, gfn); - write_unlock(&kvm->mmu_lock); - - srcu_read_unlock(&kvm->srcu, idx); + r = kvm_write_track_add_gfn(info->vfio_device.kvm, gfn); + if (r) + return r; kvmgt_protect_table_add(info, gfn); return 0; @@ -1575,9 +1564,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) { - struct kvm *kvm = info->vfio_device.kvm; - struct kvm_memory_slot *slot; - int idx; + int r; if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status)) return -ESRCH; @@ -1585,17 +1572,9 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) if (!kvmgt_gfn_is_write_protected(info, gfn)) return 0; - idx = srcu_read_lock(&kvm->srcu); - slot = gfn_to_memslot(kvm, gfn); - if (!slot) { - srcu_read_unlock(&kvm->srcu, idx); - return -EINVAL; - } - - write_lock(&kvm->mmu_lock); - kvm_write_track_remove_gfn(kvm, slot, gfn); - write_unlock(&kvm->mmu_lock); - srcu_read_unlock(&kvm->srcu, idx); + r = kvm_write_track_remove_gfn(info->vfio_device.kvm, gfn); + if (r) + return r; kvmgt_protect_table_del(info, gfn); return 0; -- cgit From f22b1e8500b449fabc33ca271cd8e91749fa63b4 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:34 -0700 Subject: KVM: x86/mmu: Handle KVM bookkeeping in page-track APIs, not callers Get/put references to KVM when a page-track notifier is (un)registered instead of relying on the caller to do so. Forcing the caller to do the bookkeeping is unnecessary and adds one more thing for users to get wrong, e.g. see commit 9ed1fdee9ee3 ("drm/i915/gvt: Get reference to KVM iff attachment to VM is successful"). Reviewed-by: Yan Zhao Tested-by: Yongwei Ma Link: https://lore.kernel.org/r/20230729013535.1070024-29-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 11 +++++------ arch/x86/kvm/mmu/page_track.c | 18 ++++++++++++------ drivers/gpu/drm/i915/gvt/kvmgt.c | 17 +++++++---------- 3 files changed, 24 insertions(+), 22 deletions(-) diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 4afab697e21c..3d040741044b 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -44,12 +44,11 @@ struct kvm_page_track_notifier_node { struct kvm_page_track_notifier_node *node); }; -void -kvm_page_track_register_notifier(struct kvm *kvm, - struct kvm_page_track_notifier_node *n); -void -kvm_page_track_unregister_notifier(struct kvm *kvm, - struct kvm_page_track_notifier_node *n); +int kvm_page_track_register_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n); +void kvm_page_track_unregister_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n); + int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn); int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn); #else diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index c9ab8b587d25..c87da11f3a04 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -157,17 +157,22 @@ int kvm_page_track_init(struct kvm *kvm) * register the notifier so that event interception for the tracked guest * pages can be received. */ -void -kvm_page_track_register_notifier(struct kvm *kvm, - struct kvm_page_track_notifier_node *n) +int kvm_page_track_register_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n) { struct kvm_page_track_notifier_head *head; + if (!kvm || kvm->mm != current->mm) + return -ESRCH; + + kvm_get_kvm(kvm); + head = &kvm->arch.track_notifier_head; write_lock(&kvm->mmu_lock); hlist_add_head_rcu(&n->node, &head->track_notifier_list); write_unlock(&kvm->mmu_lock); + return 0; } EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier); @@ -175,9 +180,8 @@ EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier); * stop receiving the event interception. It is the opposed operation of * kvm_page_track_register_notifier(). */ -void -kvm_page_track_unregister_notifier(struct kvm *kvm, - struct kvm_page_track_notifier_node *n) +void kvm_page_track_unregister_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n) { struct kvm_page_track_notifier_head *head; @@ -187,6 +191,8 @@ kvm_page_track_unregister_notifier(struct kvm *kvm, hlist_del_rcu(&n->node); write_unlock(&kvm->mmu_lock); synchronize_srcu(&head->track_srcu); + + kvm_put_kvm(kvm); } EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 21342a93e418..eb50997dd369 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -654,21 +654,19 @@ out: static int intel_vgpu_open_device(struct vfio_device *vfio_dev) { struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); - - if (!vgpu->vfio_device.kvm || - vgpu->vfio_device.kvm->mm != current->mm) { - gvt_vgpu_err("KVM is required to use Intel vGPU\n"); - return -ESRCH; - } + int ret; if (__kvmgt_vgpu_exist(vgpu)) return -EEXIST; vgpu->track_node.track_write = kvmgt_page_track_write; vgpu->track_node.track_remove_region = kvmgt_page_track_remove_region; - kvm_get_kvm(vgpu->vfio_device.kvm); - kvm_page_track_register_notifier(vgpu->vfio_device.kvm, - &vgpu->track_node); + ret = kvm_page_track_register_notifier(vgpu->vfio_device.kvm, + &vgpu->track_node); + if (ret) { + gvt_vgpu_err("KVM is required to use Intel vGPU\n"); + return ret; + } set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status); @@ -703,7 +701,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev) kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm, &vgpu->track_node); - kvm_put_kvm(vgpu->vfio_device.kvm); kvmgt_protect_table_destroy(vgpu); gvt_cache_destroy(vgpu); -- cgit From 09c8726ffa4a8309af7653988694b6cae6abf723 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 18:35:35 -0700 Subject: drm/i915/gvt: Drop final dependencies on KVM internal details Open code gpa_to_gfn() in kvmgt_page_track_write() and drop KVMGT's dependency on kvm_host.h, i.e. include only kvm_page_track.h. KVMGT assumes "gfn == gpa >> PAGE_SHIFT" all over the place, including a few lines below in the same function with the same gpa, i.e. there's no reason to use KVM's helper for this one case. No functional change intended. Reviewed-by: Yan Zhao Tested-by: Yongwei Ma Reviewed-by: Zhi Wang Link: https://lore.kernel.org/r/20230729013535.1070024-30-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- drivers/gpu/drm/i915/gvt/gvt.h | 3 ++- drivers/gpu/drm/i915/gvt/kvmgt.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 2d65800d8e93..53a0a42a50db 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -34,10 +34,11 @@ #define _GVT_H_ #include -#include #include #include +#include + #include "i915_drv.h" #include "intel_gvt.h" diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index eb50997dd369..aaed3969f204 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1585,7 +1585,7 @@ static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len, mutex_lock(&info->vgpu_lock); - if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa))) + if (kvmgt_gfn_is_write_protected(info, gpa >> PAGE_SHIFT)) intel_vgpu_page_track_handler(info, gpa, (void *)val, len); -- cgit From c5f2d5645f9b7c12c9546ced9ec1f1a558870747 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:51:56 -0700 Subject: KVM: x86/mmu: Add helper to convert root hpa to shadow page Add a dedicated helper for converting a root hpa to a shadow page in anticipation of using a "dummy" root to handle the scenario where KVM needs to load a valid shadow root (from hardware's perspective), but the guest doesn't have a visible root to shadow. Similar to PAE roots, the dummy root won't have an associated kvm_mmu_page and will need special handling when finding a shadow page given a root. Opportunistically retrieve the root shadow page in kvm_mmu_sync_roots() *after* verifying the root is unsync (the dummy root can never be unsync). Link: https://lore.kernel.org/r/20230729005200.1057358-2-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 28 +++++++++++++--------------- arch/x86/kvm/mmu/spte.h | 9 +++++++++ arch/x86/kvm/mmu/tdp_mmu.c | 2 +- 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 060849d6aadf..61b67a7d61e0 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3543,11 +3543,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, if (!VALID_PAGE(*root_hpa)) return; - /* - * The "root" may be a special root, e.g. a PAE entry, treat it as a - * SPTE to ensure any non-PA bits are dropped. - */ - sp = spte_to_child_sp(*root_hpa); + sp = root_to_sp(*root_hpa); if (WARN_ON_ONCE(!sp)) return; @@ -3593,7 +3589,7 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, &invalid_list); if (free_active_root) { - if (to_shadow_page(mmu->root.hpa)) { + if (root_to_sp(mmu->root.hpa)) { mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list); } else if (mmu->pae_root) { for (i = 0; i < 4; ++i) { @@ -3617,6 +3613,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu) { unsigned long roots_to_free = 0; + struct kvm_mmu_page *sp; hpa_t root_hpa; int i; @@ -3631,8 +3628,8 @@ void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu) if (!VALID_PAGE(root_hpa)) continue; - if (!to_shadow_page(root_hpa) || - to_shadow_page(root_hpa)->role.guest_mode) + sp = root_to_sp(root_hpa); + if (!sp || sp->role.guest_mode) roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); } @@ -3987,7 +3984,7 @@ static bool is_unsync_root(hpa_t root) * requirement isn't satisfied. */ smp_rmb(); - sp = to_shadow_page(root); + sp = root_to_sp(root); /* * PAE roots (somewhat arbitrarily) aren't backed by shadow pages, the @@ -4017,11 +4014,12 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) { hpa_t root = vcpu->arch.mmu->root.hpa; - sp = to_shadow_page(root); if (!is_unsync_root(root)) return; + sp = root_to_sp(root); + write_lock(&vcpu->kvm->mmu_lock); mmu_sync_children(vcpu, sp, true); write_unlock(&vcpu->kvm->mmu_lock); @@ -4351,7 +4349,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, static bool is_page_fault_stale(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { - struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa); + struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa); /* Special roots, e.g. pae_root, are not backed by shadow pages. */ if (sp && is_obsolete_sp(vcpu->kvm, sp)) @@ -4531,7 +4529,7 @@ static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd, { return (role.direct || pgd == root->pgd) && VALID_PAGE(root->hpa) && - role.word == to_shadow_page(root->hpa)->role.word; + role.word == root_to_sp(root->hpa)->role.word; } /* @@ -4605,7 +4603,7 @@ static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu, * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs * later if necessary. */ - if (VALID_PAGE(mmu->root.hpa) && !to_shadow_page(mmu->root.hpa)) + if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa)) kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT); if (VALID_PAGE(mmu->root.hpa)) @@ -4653,7 +4651,7 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd) */ if (!new_role.direct) __clear_sp_write_flooding_count( - to_shadow_page(vcpu->arch.mmu->root.hpa)); + root_to_sp(vcpu->arch.mmu->root.hpa)); } EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd); @@ -5508,7 +5506,7 @@ static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa) * (c) KVM doesn't track previous roots for PAE paging, and the guest * is unlikely to zap an in-use PGD. */ - sp = to_shadow_page(root_hpa); + sp = root_to_sp(root_hpa); return !sp || is_obsolete_sp(kvm, sp); } diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 83e6614f3720..29e0c1c8caa6 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -236,6 +236,15 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) return to_shadow_page(__pa(sptep)); } +static inline struct kvm_mmu_page *root_to_sp(hpa_t root) +{ + /* + * The "root" may be a special root, e.g. a PAE entry, treat it as a + * SPTE to ensure any non-PA bits are dropped. + */ + return spte_to_child_sp(root); +} + static inline bool is_mmio_spte(u64 spte) { return (spte & shadow_mmio_mask) == shadow_mmio_value && diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index fda32a72b406..6c63f2d1675f 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -689,7 +689,7 @@ static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter, else #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \ - for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end) + for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end) /* * Yield if the MMU lock is contended or this thread needs to return control -- cgit From c30e000e690af74f61a161fa60be140f23948cb1 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:51:57 -0700 Subject: KVM: x86/mmu: Harden new PGD against roots without shadow pages Harden kvm_mmu_new_pgd() against NULL pointer dereference bugs by sanity checking that the target root has an associated shadow page prior to dereferencing said shadow page. The code in question is guaranteed to only see roots with shadow pages as fast_pgd_switch() explicitly frees the current root if it doesn't have a shadow page, i.e. is a PAE root, and that in turn prevents valid roots from being cached, but that's all very subtle. Link: https://lore.kernel.org/r/20230729005200.1057358-3-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 61b67a7d61e0..de73d986a282 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4527,9 +4527,19 @@ static void nonpaging_init_context(struct kvm_mmu *context) static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd, union kvm_mmu_page_role role) { - return (role.direct || pgd == root->pgd) && - VALID_PAGE(root->hpa) && - role.word == root_to_sp(root->hpa)->role.word; + struct kvm_mmu_page *sp; + + if (!VALID_PAGE(root->hpa)) + return false; + + if (!role.direct && pgd != root->pgd) + return false; + + sp = root_to_sp(root->hpa); + if (WARN_ON_ONCE(!sp)) + return false; + + return role.word == sp->role.word; } /* @@ -4649,9 +4659,12 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd) * If this is a direct root page, it doesn't have a write flooding * count. Otherwise, clear the write flooding count. */ - if (!new_role.direct) - __clear_sp_write_flooding_count( - root_to_sp(vcpu->arch.mmu->root.hpa)); + if (!new_role.direct) { + struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa); + + if (!WARN_ON_ONCE(!sp)) + __clear_sp_write_flooding_count(sp); + } } EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd); -- cgit From 2c6d4c27b92d729a2831df2a873ba6b5f682f435 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:51:58 -0700 Subject: KVM: x86/mmu: Harden TDP MMU iteration against root w/o shadow page Explicitly check that tdp_iter_start() is handed a valid shadow page to harden KVM against bugs, e.g. if KVM calls into the TDP MMU with an invalid or shadow MMU root (which would be a fatal KVM bug), the shadow page pointer will be NULL. Opportunistically stop the TDP MMU iteration instead of continuing on with garbage if the incoming root is bogus. Attempting to walk a garbage root is more likely to caused major problems than doing nothing. Cc: Yu Zhang Link: https://lore.kernel.org/r/20230729005200.1057358-4-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_iter.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c index 5bb09f8d9fc6..bd30ebfb2f2c 100644 --- a/arch/x86/kvm/mmu/tdp_iter.c +++ b/arch/x86/kvm/mmu/tdp_iter.c @@ -39,13 +39,14 @@ void tdp_iter_restart(struct tdp_iter *iter) void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root, int min_level, gfn_t next_last_level_gfn) { - int root_level = root->role.level; - - WARN_ON_ONCE(root_level < 1); - WARN_ON_ONCE(root_level > PT64_ROOT_MAX_LEVEL); + if (WARN_ON_ONCE(!root || (root->role.level < 1) || + (root->role.level > PT64_ROOT_MAX_LEVEL))) { + iter->valid = false; + return; + } iter->next_last_level_gfn = next_last_level_gfn; - iter->root_level = root_level; + iter->root_level = root->role.level; iter->min_level = min_level; iter->pt_path[iter->root_level - 1] = (tdp_ptep_t)root->spt; iter->as_id = kvm_mmu_page_as_id(root); -- cgit From b5b359ac30d458cb3e2a7fb953adeec78fae4e35 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:51:59 -0700 Subject: KVM: x86/mmu: Disallow guest from using !visible slots for page tables Explicitly inject a page fault if guest attempts to use a !visible gfn as a page table. kvm_vcpu_gfn_to_hva_prot() will naturally handle the case where there is no memslot, but doesn't catch the scenario where the gfn points at a KVM-internal memslot. Letting the guest backdoor its way into accessing KVM-internal memslots isn't dangerous on its own, e.g. at worst the guest can crash itself, but disallowing the behavior will simplify fixing how KVM handles !visible guest root gfns (immediately synthesizing a triple fault when loading the root is architecturally wrong). Link: https://lore.kernel.org/r/20230729005200.1057358-5-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/paging_tmpl.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index f8d358226ac6..89eb45eb9dd5 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -361,6 +361,7 @@ retry_walk: ++walker->level; do { + struct kvm_memory_slot *slot; unsigned long host_addr; pt_access = pte_access; @@ -391,7 +392,11 @@ retry_walk: if (unlikely(real_gpa == INVALID_GPA)) return 0; - host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa), + slot = kvm_vcpu_gfn_to_memslot(vcpu, gpa_to_gfn(real_gpa)); + if (!kvm_is_visible_memslot(slot)) + goto error; + + host_addr = gfn_to_hva_memslot_prot(slot, gpa_to_gfn(real_gpa), &walker->pte_writable[walker->level - 1]); if (unlikely(kvm_is_error_hva(host_addr))) goto error; -- cgit From 0e3223d8d00ac05849661f1b8b476d3caca251cf Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jul 2023 17:52:00 -0700 Subject: KVM: x86/mmu: Use dummy root, backed by zero page, for !visible guest roots When attempting to allocate a shadow root for a !visible guest root gfn, e.g. that resides in MMIO space, load a dummy root that is backed by the zero page instead of immediately synthesizing a triple fault shutdown (using the zero page ensures any attempt to translate memory will generate a !PRESENT fault and thus VM-Exit). Unless the vCPU is racing with memslot activity, KVM will inject a page fault due to not finding a visible slot in FNAME(walk_addr_generic), i.e. the end result is mostly same, but critically KVM will inject a fault only *after* KVM runs the vCPU with the bogus root. Waiting to inject a fault until after running the vCPU fixes a bug where KVM would bail from nested VM-Enter if L1 tried to run L2 with TDP enabled and a !visible root. Even though a bad root will *probably* lead to shutdown, (a) it's not guaranteed and (b) the CPU won't read the underlying memory until after VM-Enter succeeds. E.g. if L1 runs L2 with a VMX preemption timer value of '0', then architecturally the preemption timer VM-Exit is guaranteed to occur before the CPU executes any instruction, i.e. before the CPU needs to translate a GPA to a HPA (so long as there are no injected events with higher priority than the preemption timer). If KVM manages to get to FNAME(fetch) with a dummy root, e.g. because userspace created a memslot between installing the dummy root and handling the page fault, simply unload the MMU to allocate a new root and retry the instruction. Use KVM_REQ_MMU_FREE_OBSOLETE_ROOTS to drop the root, as invoking kvm_mmu_free_roots() while holding mmu_lock would deadlock, and conceptually the dummy root has indeeed become obsolete. The only difference versus existing usage of KVM_REQ_MMU_FREE_OBSOLETE_ROOTS is that the root has become obsolete due to memslot *creation*, not memslot deletion or movement. Reported-by: Reima Ishii Cc: Yu Zhang Link: https://lore.kernel.org/r/20230729005200.1057358-6-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 47 ++++++++++++++++++++--------------------- arch/x86/kvm/mmu/mmu_internal.h | 10 +++++++++ arch/x86/kvm/mmu/paging_tmpl.h | 11 ++++++++++ arch/x86/kvm/mmu/spte.h | 3 +++ 4 files changed, 47 insertions(+), 24 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index de73d986a282..e1d011c67cc6 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3589,7 +3589,9 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, &invalid_list); if (free_active_root) { - if (root_to_sp(mmu->root.hpa)) { + if (kvm_mmu_is_dummy_root(mmu->root.hpa)) { + /* Nothing to cleanup for dummy roots. */ + } else if (root_to_sp(mmu->root.hpa)) { mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list); } else if (mmu->pae_root) { for (i = 0; i < 4; ++i) { @@ -3637,19 +3639,6 @@ void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu) } EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots); - -static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) -{ - int ret = 0; - - if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) { - kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); - ret = 1; - } - - return ret; -} - static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, u8 level) { @@ -3787,8 +3776,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu); root_gfn = root_pgd >> PAGE_SHIFT; - if (mmu_check_root(vcpu, root_gfn)) - return 1; + if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) { + mmu->root.hpa = kvm_mmu_get_dummy_root(); + return 0; + } /* * On SVM, reading PDPTRs might access guest memory, which might fault @@ -3800,8 +3791,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) if (!(pdptrs[i] & PT_PRESENT_MASK)) continue; - if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT)) - return 1; + if (!kvm_vcpu_is_visible_gfn(vcpu, pdptrs[i] >> PAGE_SHIFT)) + pdptrs[i] = 0; } } @@ -3968,7 +3959,7 @@ static bool is_unsync_root(hpa_t root) { struct kvm_mmu_page *sp; - if (!VALID_PAGE(root)) + if (!VALID_PAGE(root) || kvm_mmu_is_dummy_root(root)) return false; /* @@ -4374,6 +4365,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault { int r; + /* Dummy roots are used only for shadowing bad guest roots. */ + if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) + return RET_PF_RETRY; + if (page_fault_handle_page_track(vcpu, fault)) return RET_PF_EMULATE; @@ -4609,9 +4604,8 @@ static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu, gpa_t new_pgd, union kvm_mmu_page_role new_role) { /* - * For now, limit the caching to 64-bit hosts+VMs in order to avoid - * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs - * later if necessary. + * Limit reuse to 64-bit hosts+VMs without "special" roots in order to + * avoid having to deal with PDPTEs and other complexities. */ if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa)) kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT); @@ -5510,14 +5504,19 @@ static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa) /* * When freeing obsolete roots, treat roots as obsolete if they don't - * have an associated shadow page. This does mean KVM will get false + * have an associated shadow page, as it's impossible to determine if + * such roots are fresh or stale. This does mean KVM will get false * positives and free roots that don't strictly need to be freed, but * such false positives are relatively rare: * - * (a) only PAE paging and nested NPT has roots without shadow pages + * (a) only PAE paging and nested NPT have roots without shadow pages + * (or any shadow paging flavor with a dummy root, see note below) * (b) remote reloads due to a memslot update obsoletes _all_ roots * (c) KVM doesn't track previous roots for PAE paging, and the guest * is unlikely to zap an in-use PGD. + * + * Note! Dummy roots are unique in that they are obsoleted by memslot + * _creation_! See also FNAME(fetch). */ sp = root_to_sp(root_hpa); return !sp || is_obsolete_sp(kvm, sp); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 95846c40e79e..b102014e2c60 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -36,6 +36,16 @@ #define INVALID_PAE_ROOT 0 #define IS_VALID_PAE_ROOT(x) (!!(x)) +static inline hpa_t kvm_mmu_get_dummy_root(void) +{ + return my_zero_pfn(0) << PAGE_SHIFT; +} + +static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page) +{ + return is_zero_pfn(shadow_page >> PAGE_SHIFT); +} + typedef u64 __rcu *tdp_ptep_t; struct kvm_mmu_page { diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 89eb45eb9dd5..c85255073f67 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -651,6 +651,17 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa))) goto out_gpte_changed; + /* + * Load a new root and retry the faulting instruction in the extremely + * unlikely scenario that the guest root gfn became visible between + * loading a dummy root and handling the resulting page fault, e.g. if + * userspace create a memslot in the interim. + */ + if (unlikely(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) { + kvm_make_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu); + goto out_gpte_changed; + } + for_each_shadow_entry(vcpu, fault->addr, it) { gfn_t table_gfn; diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 29e0c1c8caa6..d4e45de13da0 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -238,6 +238,9 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) static inline struct kvm_mmu_page *root_to_sp(hpa_t root) { + if (kvm_mmu_is_dummy_root(root)) + return NULL; + /* * The "root" may be a special root, e.g. a PAE entry, treat it as a * SPTE to ensure any non-PA bits are dropped. -- cgit From d10f3780bc2f80744d291e118c0c8bade54ed3b8 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 8 Aug 2023 15:40:59 -0700 Subject: KVM: x86/mmu: Include mmu.h in spte.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Explicitly include mmu.h in spte.h instead of relying on the "parent" to include mmu.h. spte.h references a variety of macros and variables that are defined/declared in mmu.h, and so including spte.h before (or instead of) mmu.h will result in build errors, e.g. arch/x86/kvm/mmu/spte.h: In function ‘is_mmio_spte’: arch/x86/kvm/mmu/spte.h:242:23: error: ‘enable_mmio_caching’ undeclared 242 | likely(enable_mmio_caching); | ^~~~~~~~~~~~~~~~~~~ arch/x86/kvm/mmu/spte.h: In function ‘is_large_pte’: arch/x86/kvm/mmu/spte.h:302:22: error: ‘PT_PAGE_SIZE_MASK’ undeclared 302 | return pte & PT_PAGE_SIZE_MASK; | ^~~~~~~~~~~~~~~~~ arch/x86/kvm/mmu/spte.h: In function ‘is_dirty_spte’: arch/x86/kvm/mmu/spte.h:332:56: error: ‘PT_WRITABLE_MASK’ undeclared 332 | return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK; | ^~~~~~~~~~~~~~~~ Fixes: 5a9624affe7c ("KVM: mmu: extract spte.h and spte.c") Link: https://lore.kernel.org/r/20230808224059.2492476-1-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/spte.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index d4e45de13da0..a129951c9a88 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -3,6 +3,7 @@ #ifndef KVM_X86_MMU_SPTE_H #define KVM_X86_MMU_SPTE_H +#include "mmu.h" #include "mmu_internal.h" /* -- cgit