summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2020-10-27 22:23:28 +0000
committerMarc Zyngier <maz@kernel.org>2020-11-10 11:22:51 +0000
commit4ff3fc316d78daa2ed6de2f13616fb33a2926d8e (patch)
tree3a249f0bbbfcc7387a8b92df5016a7882c70ecb3
parentca4e514774930f30b66375a974b5edcbebaf0e7e (diff)
KVM: arm64: Move AArch32 exceptions over to AArch64 sysregs
The use of the AArch32-specific accessors have always been a bit annoying on 64bit, and it is time for a change. Let's move the AArch32 exception injection over to the AArch64 encoding, which requires us to split the two halves of FAR_EL1 into DFAR and IFAR. This enables us to drop the preempt_disable() games on VHE, and to kill the last user of the vcpu_cp15() macro. Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm64/include/asm/kvm_host.h1
-rw-r--r--arch/arm64/kvm/inject_fault.c62
2 files changed, 20 insertions, 43 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 803cb2427b54..c905c3fcaaa0 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -562,7 +562,6 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
-#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
struct kvm_vm_stat {
ulong remote_tlb_flush;
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index e2a2e48ca371..b47df73e98d7 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -69,26 +69,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
#define DFSR_FSC_EXTABT_LPAE 0x10
#define DFSR_FSC_EXTABT_nLPAE 0x08
#define DFSR_LPAE BIT(9)
-
-static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
-{
- preempt_disable();
- if (vcpu->arch.sysregs_loaded_on_cpu) {
- kvm_arch_vcpu_put(vcpu);
- return true;
- }
-
- preempt_enable();
- return false;
-}
-
-static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
-{
- if (loaded) {
- kvm_arch_vcpu_load(vcpu, smp_processor_id());
- preempt_enable();
- }
-}
+#define TTBCR_EAE BIT(31)
static void inject_undef32(struct kvm_vcpu *vcpu)
{
@@ -100,39 +81,36 @@ static void inject_undef32(struct kvm_vcpu *vcpu)
* Modelled after TakeDataAbortException() and TakePrefetchAbortException
* pseudocode.
*/
-static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
- unsigned long addr)
+static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
{
- u32 *far, *fsr;
- bool is_lpae;
- bool loaded;
+ u64 far;
+ u32 fsr;
+
+ /* Give the guest an IMPLEMENTATION DEFINED exception */
+ if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
+ fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
+ } else {
+ /* no need to shuffle FS[4] into DFSR[10] as its 0 */
+ fsr = DFSR_FSC_EXTABT_nLPAE;
+ }
- loaded = pre_fault_synchronize(vcpu);
+ far = vcpu_read_sys_reg(vcpu, FAR_EL1);
if (is_pabt) {
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_IABT |
KVM_ARM64_PENDING_EXCEPTION);
- far = &vcpu_cp15(vcpu, c6_IFAR);
- fsr = &vcpu_cp15(vcpu, c5_IFSR);
+ far &= GENMASK(31, 0);
+ far |= (u64)addr << 32;
+ vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
} else { /* !iabt */
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_DABT |
KVM_ARM64_PENDING_EXCEPTION);
- far = &vcpu_cp15(vcpu, c6_DFAR);
- fsr = &vcpu_cp15(vcpu, c5_DFSR);
- }
-
- *far = addr;
-
- /* Give the guest an IMPLEMENTATION DEFINED exception */
- is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
- if (is_lpae) {
- *fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
- } else {
- /* no need to shuffle FS[4] into DFSR[10] as its 0 */
- *fsr = DFSR_FSC_EXTABT_nLPAE;
+ far &= GENMASK(63, 32);
+ far |= addr;
+ vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
}
- post_fault_synchronize(vcpu, loaded);
+ vcpu_write_sys_reg(vcpu, far, FAR_EL1);
}
/**