summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2019-11-15 10:30:50 +0100
committerThomas Gleixner <tglx@linutronix.de>2019-11-15 10:30:50 +0100
commitac94be498f84f7327533b62faca4c3da64434904 (patch)
tree63893f37afb67cd400bf60ec16a35440d16f2a90 /arch/arm64/kvm
parentdce7cd62754b5d4a6e401b8b0769ec94cf971041 (diff)
parent8c5bd25bf42effd194d4b0b43895c42b374e620b (diff)
Merge branch 'linus' into x86/hyperv
Pick up upstream fixes to avoid conflicts.
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c30
-rw-r--r--arch/arm64/kvm/hyp/switch.c88
-rw-r--r--arch/arm64/kvm/hyp/tlb.c50
-rw-r--r--arch/arm64/kvm/regmap.c5
-rw-r--r--arch/arm64/kvm/sys_regs.c36
-rw-r--r--arch/arm64/kvm/va_layout.c14
6 files changed, 169 insertions, 54 deletions
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 26781da3ad3e..0fc9872a1467 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -18,40 +18,70 @@
#define save_debug(ptr,reg,nr) \
switch (nr) { \
case 15: ptr[15] = read_debug(reg, 15); \
+ /* Fall through */ \
case 14: ptr[14] = read_debug(reg, 14); \
+ /* Fall through */ \
case 13: ptr[13] = read_debug(reg, 13); \
+ /* Fall through */ \
case 12: ptr[12] = read_debug(reg, 12); \
+ /* Fall through */ \
case 11: ptr[11] = read_debug(reg, 11); \
+ /* Fall through */ \
case 10: ptr[10] = read_debug(reg, 10); \
+ /* Fall through */ \
case 9: ptr[9] = read_debug(reg, 9); \
+ /* Fall through */ \
case 8: ptr[8] = read_debug(reg, 8); \
+ /* Fall through */ \
case 7: ptr[7] = read_debug(reg, 7); \
+ /* Fall through */ \
case 6: ptr[6] = read_debug(reg, 6); \
+ /* Fall through */ \
case 5: ptr[5] = read_debug(reg, 5); \
+ /* Fall through */ \
case 4: ptr[4] = read_debug(reg, 4); \
+ /* Fall through */ \
case 3: ptr[3] = read_debug(reg, 3); \
+ /* Fall through */ \
case 2: ptr[2] = read_debug(reg, 2); \
+ /* Fall through */ \
case 1: ptr[1] = read_debug(reg, 1); \
+ /* Fall through */ \
default: ptr[0] = read_debug(reg, 0); \
}
#define restore_debug(ptr,reg,nr) \
switch (nr) { \
case 15: write_debug(ptr[15], reg, 15); \
+ /* Fall through */ \
case 14: write_debug(ptr[14], reg, 14); \
+ /* Fall through */ \
case 13: write_debug(ptr[13], reg, 13); \
+ /* Fall through */ \
case 12: write_debug(ptr[12], reg, 12); \
+ /* Fall through */ \
case 11: write_debug(ptr[11], reg, 11); \
+ /* Fall through */ \
case 10: write_debug(ptr[10], reg, 10); \
+ /* Fall through */ \
case 9: write_debug(ptr[9], reg, 9); \
+ /* Fall through */ \
case 8: write_debug(ptr[8], reg, 8); \
+ /* Fall through */ \
case 7: write_debug(ptr[7], reg, 7); \
+ /* Fall through */ \
case 6: write_debug(ptr[6], reg, 6); \
+ /* Fall through */ \
case 5: write_debug(ptr[5], reg, 5); \
+ /* Fall through */ \
case 4: write_debug(ptr[4], reg, 4); \
+ /* Fall through */ \
case 3: write_debug(ptr[3], reg, 3); \
+ /* Fall through */ \
case 2: write_debug(ptr[2], reg, 2); \
+ /* Fall through */ \
case 1: write_debug(ptr[1], reg, 1); \
+ /* Fall through */ \
default: write_debug(ptr[0], reg, 0); \
}
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index adaf266d8de8..799e84a40335 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -124,6 +124,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
{
u64 hcr = vcpu->arch.hcr_el2;
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
+ hcr |= HCR_TVM;
+
write_sysreg(hcr, hcr_el2);
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
@@ -174,8 +177,10 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
* the crucial bit is "On taking a vSError interrupt,
* HCR_EL2.VSE is cleared to 0."
*/
- if (vcpu->arch.hcr_el2 & HCR_VSE)
- vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
+ if (vcpu->arch.hcr_el2 & HCR_VSE) {
+ vcpu->arch.hcr_el2 &= ~HCR_VSE;
+ vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
+ }
if (has_vhe())
deactivate_traps_vhe();
@@ -229,20 +234,6 @@ static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
}
}
-static bool __hyp_text __true_value(void)
-{
- return true;
-}
-
-static bool __hyp_text __false_value(void)
-{
- return false;
-}
-
-static hyp_alternate_select(__check_arm_834220,
- __false_value, __true_value,
- ARM64_WORKAROUND_834220);
-
static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
{
u64 par, tmp;
@@ -264,7 +255,7 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
tmp = read_sysreg(par_el1);
write_sysreg(par, par_el1);
- if (unlikely(tmp & 1))
+ if (unlikely(tmp & SYS_PAR_EL1_F))
return false; /* Translation failed, back to guest */
/* Convert PAR to HPFAR format */
@@ -298,7 +289,8 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
* resolve the IPA using the AT instruction.
*/
if (!(esr & ESR_ELx_S1PTW) &&
- (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
+ (cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
+ (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
if (!__translate_far_to_hpfar(far, &hpfar))
return false;
} else {
@@ -393,6 +385,61 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
return true;
}
+static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
+{
+ u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
+ int rt = kvm_vcpu_sys_get_rt(vcpu);
+ u64 val = vcpu_get_reg(vcpu, rt);
+
+ /*
+ * The normal sysreg handling code expects to see the traps,
+ * let's not do anything here.
+ */
+ if (vcpu->arch.hcr_el2 & HCR_TVM)
+ return false;
+
+ switch (sysreg) {
+ case SYS_SCTLR_EL1:
+ write_sysreg_el1(val, SYS_SCTLR);
+ break;
+ case SYS_TTBR0_EL1:
+ write_sysreg_el1(val, SYS_TTBR0);
+ break;
+ case SYS_TTBR1_EL1:
+ write_sysreg_el1(val, SYS_TTBR1);
+ break;
+ case SYS_TCR_EL1:
+ write_sysreg_el1(val, SYS_TCR);
+ break;
+ case SYS_ESR_EL1:
+ write_sysreg_el1(val, SYS_ESR);
+ break;
+ case SYS_FAR_EL1:
+ write_sysreg_el1(val, SYS_FAR);
+ break;
+ case SYS_AFSR0_EL1:
+ write_sysreg_el1(val, SYS_AFSR0);
+ break;
+ case SYS_AFSR1_EL1:
+ write_sysreg_el1(val, SYS_AFSR1);
+ break;
+ case SYS_MAIR_EL1:
+ write_sysreg_el1(val, SYS_MAIR);
+ break;
+ case SYS_AMAIR_EL1:
+ write_sysreg_el1(val, SYS_AMAIR);
+ break;
+ case SYS_CONTEXTIDR_EL1:
+ write_sysreg_el1(val, SYS_CONTEXTIDR);
+ break;
+ default:
+ return false;
+ }
+
+ __kvm_skip_instr(vcpu);
+ return true;
+}
+
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
@@ -412,6 +459,11 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (*exit_code != ARM_EXCEPTION_TRAP)
goto exit;
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
+ kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
+ handle_tx2_tvm(vcpu))
+ return true;
+
/*
* We trap the first access to the FP/SIMD to save the host context
* and restore the guest context lazily.
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index d49a14497715..eb0efc5557f3 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -67,10 +67,14 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
isb();
}
-static hyp_alternate_select(__tlb_switch_to_guest,
- __tlb_switch_to_guest_nvhe,
- __tlb_switch_to_guest_vhe,
- ARM64_HAS_VIRT_HOST_EXTN);
+static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
+{
+ if (has_vhe())
+ __tlb_switch_to_guest_vhe(kvm, cxt);
+ else
+ __tlb_switch_to_guest_nvhe(kvm, cxt);
+}
static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
@@ -98,10 +102,14 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
write_sysreg(0, vttbr_el2);
}
-static hyp_alternate_select(__tlb_switch_to_host,
- __tlb_switch_to_host_nvhe,
- __tlb_switch_to_host_vhe,
- ARM64_HAS_VIRT_HOST_EXTN);
+static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
+{
+ if (has_vhe())
+ __tlb_switch_to_host_vhe(kvm, cxt);
+ else
+ __tlb_switch_to_host_nvhe(kvm, cxt);
+}
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
@@ -111,7 +119,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest()(kvm, &cxt);
+ __tlb_switch_to_guest(kvm, &cxt);
/*
* We could do so much better if we had the VA as well.
@@ -154,7 +162,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
if (!has_vhe() && icache_is_vpipt())
__flush_icache_all();
- __tlb_switch_to_host()(kvm, &cxt);
+ __tlb_switch_to_host(kvm, &cxt);
}
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
@@ -165,13 +173,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest()(kvm, &cxt);
+ __tlb_switch_to_guest(kvm, &cxt);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
- __tlb_switch_to_host()(kvm, &cxt);
+ __tlb_switch_to_host(kvm, &cxt);
}
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
@@ -180,19 +188,31 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
struct tlb_inv_context cxt;
/* Switch to requested VMID */
- __tlb_switch_to_guest()(kvm, &cxt);
+ __tlb_switch_to_guest(kvm, &cxt);
__tlbi(vmalle1);
dsb(nsh);
isb();
- __tlb_switch_to_host()(kvm, &cxt);
+ __tlb_switch_to_host(kvm, &cxt);
}
void __hyp_text __kvm_flush_vm_context(void)
{
dsb(ishst);
__tlbi(alle1is);
- asm volatile("ic ialluis" : : );
+
+ /*
+ * VIPT and PIPT caches are not affected by VMID, so no maintenance
+ * is necessary across a VMID rollover.
+ *
+ * VPIPT caches constrain lookup and maintenance to the active VMID,
+ * so we need to invalidate lines with a stale VMID to avoid an ABA
+ * race after multiple rollovers.
+ *
+ */
+ if (icache_is_vpipt())
+ asm volatile("ic ialluis");
+
dsb(ish);
}
diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
index 0d60e4f0af66..a900181e3867 100644
--- a/arch/arm64/kvm/regmap.c
+++ b/arch/arm64/kvm/regmap.c
@@ -178,13 +178,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
switch (spsr_idx) {
case KVM_SPSR_SVC:
write_sysreg_el1(v, SYS_SPSR);
+ break;
case KVM_SPSR_ABT:
write_sysreg(v, spsr_abt);
+ break;
case KVM_SPSR_UND:
write_sysreg(v, spsr_und);
+ break;
case KVM_SPSR_IRQ:
write_sysreg(v, spsr_irq);
+ break;
case KVM_SPSR_FIQ:
write_sysreg(v, spsr_fiq);
+ break;
}
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index f26e181d881c..46822afc57e0 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -632,7 +632,9 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
*/
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
- __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
+ if (!system_supports_32bit_el0())
+ val |= ARMV8_PMU_PMCR_LC;
+ __vcpu_sys_reg(vcpu, r->reg) = val;
}
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
@@ -682,6 +684,8 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
val &= ~ARMV8_PMU_PMCR_MASK;
val |= p->regval & ARMV8_PMU_PMCR_MASK;
+ if (!system_supports_32bit_el0())
+ val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
kvm_pmu_handle_pmcr(vcpu, val);
kvm_vcpu_pmu_restore_guest(vcpu);
@@ -981,13 +985,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
- trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
+ trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
{ SYS_DESC(SYS_DBGBCRn_EL1(n)), \
- trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
+ trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
{ SYS_DESC(SYS_DBGWVRn_EL1(n)), \
- trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
+ trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
- trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
+ trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
/* Macro to expand the PMEVCNTRn_EL0 register */
#define PMU_PMEVCNTR_EL0(n) \
@@ -1540,7 +1544,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
- { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
+ { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
@@ -2254,13 +2258,19 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
}
static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *table, size_t num)
+ const struct sys_reg_desc *table, size_t num,
+ unsigned long *bmap)
{
unsigned long i;
for (i = 0; i < num; i++)
- if (table[i].reset)
+ if (table[i].reset) {
+ int reg = table[i].reg;
+
table[i].reset(vcpu, &table[i]);
+ if (reg > 0 && reg < NR_SYS_REGS)
+ set_bit(reg, bmap);
+ }
}
/**
@@ -2774,18 +2784,16 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
{
size_t num;
const struct sys_reg_desc *table;
-
- /* Catch someone adding a register without putting in reset entry. */
- memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
+ DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
/* Generic chip reset first (so target could override). */
- reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
+ reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
table = get_target_table(vcpu->arch.target, true, &num);
- reset_sys_reg_descs(vcpu, table, num);
+ reset_sys_reg_descs(vcpu, table, num, bmap);
for (num = 1; num < NR_SYS_REGS; num++) {
- if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
+ if (WARN(!test_bit(num, bmap),
"Didn't reset __vcpu_sys_reg(%zi)\n", num))
break;
}
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index acd8084f1f2c..2cf7d4b606c3 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -29,25 +29,25 @@ static void compute_layout(void)
int kva_msb;
/* Where is my RAM region? */
- hyp_va_msb = idmap_addr & BIT(VA_BITS - 1);
- hyp_va_msb ^= BIT(VA_BITS - 1);
+ hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
+ hyp_va_msb ^= BIT(vabits_actual - 1);
kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
(u64)(high_memory - 1));
- if (kva_msb == (VA_BITS - 1)) {
+ if (kva_msb == (vabits_actual - 1)) {
/*
* No space in the address, let's compute the mask so
- * that it covers (VA_BITS - 1) bits, and the region
+ * that it covers (vabits_actual - 1) bits, and the region
* bit. The tag stays set to zero.
*/
- va_mask = BIT(VA_BITS - 1) - 1;
+ va_mask = BIT(vabits_actual - 1) - 1;
va_mask |= hyp_va_msb;
} else {
/*
* We do have some free bits to insert a random tag.
* Hyp VAs are now created from kernel linear map VAs
- * using the following formula (with V == VA_BITS):
+ * using the following formula (with V == vabits_actual):
*
* 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
* ---------------------------------------------------------
@@ -55,7 +55,7 @@ static void compute_layout(void)
*/
tag_lsb = kva_msb;
va_mask = GENMASK_ULL(tag_lsb - 1, 0);
- tag_val = get_random_long() & GENMASK_ULL(VA_BITS - 2, tag_lsb);
+ tag_val = get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
tag_val |= hyp_va_msb;
tag_val >>= tag_lsb;
}