diff options
| -rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 2 | ||||
| -rw-r--r-- | arch/arm64/include/asm/sysreg.h | 4 | ||||
| -rw-r--r-- | arch/arm64/kvm/arm.c | 16 | ||||
| -rw-r--r-- | arch/arm64/kvm/sys_regs.c | 111 | 
4 files changed, 89 insertions, 44 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 781d029b8aa8..0cd9f0f75c13 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -118,6 +118,8 @@ struct kvm_arch {  	 */  	unsigned long *pmu_filter;  	unsigned int pmuver; + +	u8 pfr0_csv2;  };  struct kvm_vcpu_fault_info { diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 174817ba119c..e2ef4c2edf06 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -372,6 +372,8 @@  #define SYS_CONTEXTIDR_EL1		sys_reg(3, 0, 13, 0, 1)  #define SYS_TPIDR_EL1			sys_reg(3, 0, 13, 0, 4) +#define SYS_SCXTNUM_EL1			sys_reg(3, 0, 13, 0, 7) +  #define SYS_CNTKCTL_EL1			sys_reg(3, 0, 14, 1, 0)  #define SYS_CCSIDR_EL1			sys_reg(3, 1, 0, 0, 0) @@ -404,6 +406,8 @@  #define SYS_TPIDR_EL0			sys_reg(3, 3, 13, 0, 2)  #define SYS_TPIDRRO_EL0			sys_reg(3, 3, 13, 0, 3) +#define SYS_SCXTNUM_EL0			sys_reg(3, 3, 13, 0, 7) +  /* Definitions for system register interface to AMU for ARMv8.4 onwards */  #define SYS_AM_EL0(crm, op2)		sys_reg(3, 3, 13, (crm), (op2))  #define SYS_AMCR_EL0			SYS_AM_EL0(2, 0) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 5750ec34960e..c0ffb019ca8b 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -102,6 +102,20 @@ static int kvm_arm_default_max_vcpus(void)  	return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;  } +static void set_default_csv2(struct kvm *kvm) +{ +	/* +	 * The default is to expose CSV2 == 1 if the HW isn't affected. +	 * Although this is a per-CPU feature, we make it global because +	 * asymmetric systems are just a nuisance. +	 * +	 * Userspace can override this as long as it doesn't promise +	 * the impossible. +	 */ +	if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) +		kvm->arch.pfr0_csv2 = 1; +} +  /**   * kvm_arch_init_vm - initializes a VM data structure   * @kvm:	pointer to the KVM struct @@ -127,6 +141,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)  	/* The maximum number of VCPUs is limited by the host's GIC model */  	kvm->arch.max_vcpus = kvm_arm_default_max_vcpus(); +	set_default_csv2(kvm); +  	return ret;  out_free_stage2_pgd:  	kvm_free_stage2_pgd(&kvm->arch.mmu); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index d0868d0e8ff4..c1fac9836af1 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1038,8 +1038,8 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,  	{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)),					\  	  access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } -static bool access_amu(struct kvm_vcpu *vcpu, struct sys_reg_params *p, -			     const struct sys_reg_desc *r) +static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p, +			 const struct sys_reg_desc *r)  {  	kvm_inject_undefined(vcpu); @@ -1047,24 +1047,10 @@ static bool access_amu(struct kvm_vcpu *vcpu, struct sys_reg_params *p,  }  /* Macro to expand the AMU counter and type registers*/ -#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), access_amu } -#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), access_amu } -#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), access_amu } -#define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), access_amu } - -static bool trap_ptrauth(struct kvm_vcpu *vcpu, -			 struct sys_reg_params *p, -			 const struct sys_reg_desc *rd) -{ -	/* -	 * If we land here, that is because we didn't fixup the access on exit -	 * by allowing the PtrAuth sysregs. The only way this happens is when -	 * the guest does not have PtrAuth support enabled. -	 */ -	kvm_inject_undefined(vcpu); - -	return false; -} +#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access } +#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access } +#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access } +#define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }  static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,  			const struct sys_reg_desc *rd) @@ -1072,8 +1058,14 @@ static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,  	return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;  } +/* + * If we land here on a PtrAuth access, that is because we didn't + * fixup the access on exit by allowing the PtrAuth sysregs. The only + * way this happens is when the guest does not have PtrAuth support + * enabled. + */  #define __PTRAUTH_KEY(k)						\ -	{ SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k,		\ +	{ SYS_DESC(SYS_## k), undef_access, reset_unknown, k,		\  	.visibility = ptrauth_visibility}  #define PTRAUTH_KEY(k)							\ @@ -1128,9 +1120,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,  		if (!vcpu_has_sve(vcpu))  			val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);  		val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT); -		if (!(val & (0xfUL << ID_AA64PFR0_CSV2_SHIFT)) && -		    arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) -			val |= (1UL << ID_AA64PFR0_CSV2_SHIFT); +		val &= ~(0xfUL << ID_AA64PFR0_CSV2_SHIFT); +		val |= ((u64)vcpu->kvm->arch.pfr0_csv2 << ID_AA64PFR0_CSV2_SHIFT);  	} else if (id == SYS_ID_AA64PFR1_EL1) {  		val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT);  	} else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { @@ -1213,6 +1204,40 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,  	return REG_HIDDEN;  } +static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, +			       const struct sys_reg_desc *rd, +			       const struct kvm_one_reg *reg, void __user *uaddr) +{ +	const u64 id = sys_reg_to_index(rd); +	int err; +	u64 val; +	u8 csv2; + +	err = reg_from_user(&val, uaddr, id); +	if (err) +		return err; + +	/* +	 * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as +	 * it doesn't promise more than what is actually provided (the +	 * guest could otherwise be covered in ectoplasmic residue). +	 */ +	csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT); +	if (csv2 > 1 || +	    (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED)) +		return -EINVAL; + +	/* We can only differ with CSV2, and anything else is an error */ +	val ^= read_id_reg(vcpu, rd, false); +	val &= ~(0xFUL << ID_AA64PFR0_CSV2_SHIFT); +	if (val) +		return -EINVAL; + +	vcpu->kvm->arch.pfr0_csv2 = csv2; + +	return 0; +} +  /*   * cpufeature ID register user accessors   * @@ -1341,13 +1366,6 @@ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,  	return true;  } -static bool access_mte_regs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, -			    const struct sys_reg_desc *r) -{ -	kvm_inject_undefined(vcpu); -	return false; -} -  /* sys_reg_desc initialiser for known cpufeature ID registers */  #define ID_SANITISED(name) {			\  	SYS_DESC(SYS_##name),			\ @@ -1472,7 +1490,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {  	/* AArch64 ID registers */  	/* CRm=4 */ -	ID_SANITISED(ID_AA64PFR0_EL1), +	{ SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg, +	  .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },  	ID_SANITISED(ID_AA64PFR1_EL1),  	ID_UNALLOCATED(4,2),  	ID_UNALLOCATED(4,3), @@ -1515,8 +1534,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {  	{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },  	{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, -	{ SYS_DESC(SYS_RGSR_EL1), access_mte_regs }, -	{ SYS_DESC(SYS_GCR_EL1), access_mte_regs }, +	{ SYS_DESC(SYS_RGSR_EL1), undef_access }, +	{ SYS_DESC(SYS_GCR_EL1), undef_access },  	{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },  	{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, @@ -1542,8 +1561,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {  	{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },  	{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi }, -	{ SYS_DESC(SYS_TFSR_EL1), access_mte_regs }, -	{ SYS_DESC(SYS_TFSRE0_EL1), access_mte_regs }, +	{ SYS_DESC(SYS_TFSR_EL1), undef_access }, +	{ SYS_DESC(SYS_TFSRE0_EL1), undef_access },  	{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },  	{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 }, @@ -1579,6 +1598,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {  	{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },  	{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 }, +	{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access }, +  	{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},  	{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr }, @@ -1607,14 +1628,16 @@ static const struct sys_reg_desc sys_reg_descs[] = {  	{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },  	{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, -	{ SYS_DESC(SYS_AMCR_EL0), access_amu }, -	{ SYS_DESC(SYS_AMCFGR_EL0), access_amu }, -	{ SYS_DESC(SYS_AMCGCR_EL0), access_amu }, -	{ SYS_DESC(SYS_AMUSERENR_EL0), access_amu }, -	{ SYS_DESC(SYS_AMCNTENCLR0_EL0), access_amu }, -	{ SYS_DESC(SYS_AMCNTENSET0_EL0), access_amu }, -	{ SYS_DESC(SYS_AMCNTENCLR1_EL0), access_amu }, -	{ SYS_DESC(SYS_AMCNTENSET1_EL0), access_amu }, +	{ SYS_DESC(SYS_SCXTNUM_EL0), undef_access }, + +	{ SYS_DESC(SYS_AMCR_EL0), undef_access }, +	{ SYS_DESC(SYS_AMCFGR_EL0), undef_access }, +	{ SYS_DESC(SYS_AMCGCR_EL0), undef_access }, +	{ SYS_DESC(SYS_AMUSERENR_EL0), undef_access }, +	{ SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access }, +	{ SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access }, +	{ SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access }, +	{ SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },  	AMU_AMEVCNTR0_EL0(0),  	AMU_AMEVCNTR0_EL0(1),  	AMU_AMEVCNTR0_EL0(2),  | 
