diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2021-05-28 19:07:41 +1000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2021-06-10 22:12:14 +1000 |
commit | 2e1ae9cd56f8616a707185f3c6cb7ee2a20809e1 (patch) | |
tree | 820ef3667a86af583ace51c09976380cf92dd23f /arch/powerpc/kvm/book3s_hv_p9_entry.c | |
parent | 41f779917669fcc28a7f5646d1f7a85043c9d152 (diff) |
KVM: PPC: Book3S HV: Implement radix prefetch workaround by disabling MMU
Rather than partition the guest PID space + flush a rogue guest PID to
work around this problem, instead fix it by always disabling the MMU when
switching in or out of guest MMU context in HV mode.
This may be a bit less efficient, but it is a lot less complicated and
allows the P9 path to trivally implement the workaround too. Newer CPUs
are not subject to this issue.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210528090752.3542186-22-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_p9_entry.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_p9_entry.c | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index 0b5bd00c9d0f..178f771e299c 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -218,6 +218,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc mtspr(SPRN_AMOR, ~0UL); + if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) + __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0); + switch_mmu_to_guest_radix(kvm, vcpu, lpcr); /* @@ -226,7 +229,8 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc */ mtspr(SPRN_HDEC, hdec); - __mtmsrd(0, 1); /* clear RI */ + if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) + __mtmsrd(0, 1); /* clear RI */ mtspr(SPRN_DAR, vcpu->arch.shregs.dar); mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); @@ -341,8 +345,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc radix_clear_slb(); - __mtmsrd(msr, 0); - accumulate_time(vcpu, &vcpu->arch.rm_exit); /* Advance host PURR/SPURR by the amount used by guest */ @@ -408,6 +410,12 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc switch_mmu_to_host_radix(kvm, host_pidr); + /* + * If we are in real mode, only switch MMU on after the MMU is + * switched to host, to avoid the P9_RADIX_PREFETCH_BUG. + */ + __mtmsrd(msr, 0); + end_timing(vcpu); return trap; |