summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/inject_fault.c
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2020-10-27 22:23:28 +0000
committerMarc Zyngier <maz@kernel.org>2020-11-10 11:22:51 +0000
commit4ff3fc316d78daa2ed6de2f13616fb33a2926d8e (patch)
tree3a249f0bbbfcc7387a8b92df5016a7882c70ecb3 /arch/arm64/kvm/inject_fault.c
parentca4e514774930f30b66375a974b5edcbebaf0e7e (diff)
KVM: arm64: Move AArch32 exceptions over to AArch64 sysregs
The use of the AArch32-specific accessors have always been a bit annoying on 64bit, and it is time for a change. Let's move the AArch32 exception injection over to the AArch64 encoding, which requires us to split the two halves of FAR_EL1 into DFAR and IFAR. This enables us to drop the preempt_disable() games on VHE, and to kill the last user of the vcpu_cp15() macro. Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/kvm/inject_fault.c')
-rw-r--r--arch/arm64/kvm/inject_fault.c62
1 files changed, 20 insertions, 42 deletions
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index e2a2e48ca371..b47df73e98d7 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -69,26 +69,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
#define DFSR_FSC_EXTABT_LPAE 0x10
#define DFSR_FSC_EXTABT_nLPAE 0x08
#define DFSR_LPAE BIT(9)
-
-static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
-{
- preempt_disable();
- if (vcpu->arch.sysregs_loaded_on_cpu) {
- kvm_arch_vcpu_put(vcpu);
- return true;
- }
-
- preempt_enable();
- return false;
-}
-
-static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
-{
- if (loaded) {
- kvm_arch_vcpu_load(vcpu, smp_processor_id());
- preempt_enable();
- }
-}
+#define TTBCR_EAE BIT(31)
static void inject_undef32(struct kvm_vcpu *vcpu)
{
@@ -100,39 +81,36 @@ static void inject_undef32(struct kvm_vcpu *vcpu)
* Modelled after TakeDataAbortException() and TakePrefetchAbortException
* pseudocode.
*/
-static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
- unsigned long addr)
+static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
{
- u32 *far, *fsr;
- bool is_lpae;
- bool loaded;
+ u64 far;
+ u32 fsr;
+
+ /* Give the guest an IMPLEMENTATION DEFINED exception */
+ if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
+ fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
+ } else {
+ /* no need to shuffle FS[4] into DFSR[10] as its 0 */
+ fsr = DFSR_FSC_EXTABT_nLPAE;
+ }
- loaded = pre_fault_synchronize(vcpu);
+ far = vcpu_read_sys_reg(vcpu, FAR_EL1);
if (is_pabt) {
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_IABT |
KVM_ARM64_PENDING_EXCEPTION);
- far = &vcpu_cp15(vcpu, c6_IFAR);
- fsr = &vcpu_cp15(vcpu, c5_IFSR);
+ far &= GENMASK(31, 0);
+ far |= (u64)addr << 32;
+ vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
} else { /* !iabt */
vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_DABT |
KVM_ARM64_PENDING_EXCEPTION);
- far = &vcpu_cp15(vcpu, c6_DFAR);
- fsr = &vcpu_cp15(vcpu, c5_DFSR);
- }
-
- *far = addr;
-
- /* Give the guest an IMPLEMENTATION DEFINED exception */
- is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
- if (is_lpae) {
- *fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
- } else {
- /* no need to shuffle FS[4] into DFSR[10] as its 0 */
- *fsr = DFSR_FSC_EXTABT_nLPAE;
+ far &= GENMASK(63, 32);
+ far |= addr;
+ vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
}
- post_fault_synchronize(vcpu, loaded);
+ vcpu_write_sys_reg(vcpu, far, FAR_EL1);
}
/**