summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2019-02-20 19:55:00 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2019-02-21 13:19:52 +1100
commitd976f6807ea613c54fcb74bd7ae68a43fdd62e1f (patch)
tree860545b539ccbd2990c46f8e84fb847c6c022a93 /arch/powerpc/kvm
parent1c7fc5cbc33980acd13d668f1c8f0313d6ae9fd8 (diff)
KVM: PPC: Book3S HV: Context switch AMR on Power9
kvmhv_p9_guest_entry() implements a fast-path guest entry for Power9 when guest and host are both running with the Radix MMU. Currently in that path we don't save the host AMR (Authority Mask Register) value, and we always restore 0 on return to the host. That is OK at the moment because the AMR is not used for storage keys with the Radix MMU. However we plan to start using the AMR on Radix to prevent the kernel from reading/writing to userspace outside of copy_to/from_user(). In order to make that work we need to save/restore the AMR value. We only restore the value if it is different from the guest value, which is already in the register when we exit to the host. This should mean we rarely need to actually restore the value when running a modern Linux as a guest, because it will be using the same value as us. Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Tested-by: Russell Currey <ruscur@russell.cc>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 5a066fc299e1..105a3f78a760 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3455,6 +3455,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long host_dscr = mfspr(SPRN_DSCR);
unsigned long host_tidr = mfspr(SPRN_TIDR);
unsigned long host_iamr = mfspr(SPRN_IAMR);
+ unsigned long host_amr = mfspr(SPRN_AMR);
s64 dec;
u64 tb;
int trap, save_pmu;
@@ -3571,13 +3572,15 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr(SPRN_PSPB, 0);
mtspr(SPRN_WORT, 0);
- mtspr(SPRN_AMR, 0);
mtspr(SPRN_UAMOR, 0);
mtspr(SPRN_DSCR, host_dscr);
mtspr(SPRN_TIDR, host_tidr);
mtspr(SPRN_IAMR, host_iamr);
mtspr(SPRN_PSPB, 0);
+ if (host_amr != vcpu->arch.amr)
+ mtspr(SPRN_AMR, host_amr);
+
msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
store_fp_state(&vcpu->arch.fp);
#ifdef CONFIG_ALTIVEC