summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/arch_timer.h8
-rw-r--r--arch/arm64/include/asm/kvm_asm.h2
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h4
-rw-r--r--arch/arm64/include/asm/timex.h2
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h7
-rw-r--r--arch/arm64/kvm/hyp/switch.c6
-rw-r--r--arch/arm64/kvm/inject_fault.c88
-rw-r--r--arch/arm64/kvm/sys_regs.c41
9 files changed, 39 insertions, 124 deletions
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index bdedd8f748d1..f2a234d6516c 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -52,6 +52,7 @@ struct arch_timer_erratum_workaround {
const char *desc;
u32 (*read_cntp_tval_el0)(void);
u32 (*read_cntv_tval_el0)(void);
+ u64 (*read_cntpct_el0)(void);
u64 (*read_cntvct_el0)(void);
int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
@@ -149,11 +150,8 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
static inline u64 arch_counter_get_cntpct(void)
{
- /*
- * AArch64 kernel and user space mandate the use of CNTVCT.
- */
- BUG();
- return 0;
+ isb();
+ return arch_timer_reg_read_stable(cntpct_el0);
}
static inline u64 arch_counter_get_cntvct(void)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 26a64d0f9ab9..ab4d0a926043 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -55,6 +55,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
+extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
+
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_ich_vtr_el2(void);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index e5df3fce0008..5f28dfa14cee 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -41,6 +41,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+void kvm_inject_undef32(struct kvm_vcpu *vcpu);
+void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
+void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
@@ -237,7 +240,7 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
{
- switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
+ switch (kvm_vcpu_trap_get_fault(vcpu)) {
case FSC_SEA:
case FSC_SEA_TTW0:
case FSC_SEA_TTW1:
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 4572a9b560fa..08d3bb66c8b7 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -129,8 +129,8 @@ void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
-void __timer_save_state(struct kvm_vcpu *vcpu);
-void __timer_restore_state(struct kvm_vcpu *vcpu);
+void __timer_enable_traps(struct kvm_vcpu *vcpu);
+void __timer_disable_traps(struct kvm_vcpu *vcpu);
void __sysreg_save_host_state(struct kvm_cpu_context *ctxt);
void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt);
diff --git a/arch/arm64/include/asm/timex.h b/arch/arm64/include/asm/timex.h
index 81a076eb37fa..9ad60bae5c8d 100644
--- a/arch/arm64/include/asm/timex.h
+++ b/arch/arm64/include/asm/timex.h
@@ -22,7 +22,7 @@
* Use the current timer as a cycle counter since this is what we use for
* the delay loop.
*/
-#define get_cycles() arch_counter_get_cntvct()
+#define get_cycles() arch_timer_read_counter()
#include <asm-generic/timex.h>
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 51149ec75fe4..9abbf3044654 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -196,6 +196,12 @@ struct kvm_arch_memory_slot {
#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
+/* Physical Timer EL0 Registers */
+#define KVM_REG_ARM_PTIMER_CTL ARM64_SYS_REG(3, 3, 14, 2, 1)
+#define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2)
+#define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1)
+
+/* EL0 Virtual Timer Registers */
#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
@@ -228,6 +234,7 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
+#define KVM_DEV_ARM_ITS_CTRL_RESET 4
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 951f3ebaff26..525c01f48867 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -304,7 +304,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__activate_vm(vcpu);
__vgic_restore_state(vcpu);
- __timer_restore_state(vcpu);
+ __timer_enable_traps(vcpu);
/*
* We must restore the 32-bit state before the sysregs, thanks
@@ -374,7 +374,7 @@ again:
__sysreg_save_guest_state(guest_ctxt);
__sysreg32_save_state(vcpu);
- __timer_save_state(vcpu);
+ __timer_disable_traps(vcpu);
__vgic_save_state(vcpu);
__deactivate_traps(vcpu);
@@ -442,7 +442,7 @@ void __hyp_text __noreturn __hyp_panic(void)
vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
- __timer_save_state(vcpu);
+ __timer_disable_traps(vcpu);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
__sysreg_restore_host_state(host_ctxt);
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 3556715a774e..8ecbcb40e317 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -33,88 +33,6 @@
#define LOWER_EL_AArch64_VECTOR 0x400
#define LOWER_EL_AArch32_VECTOR 0x600
-/*
- * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
- */
-static const u8 return_offsets[8][2] = {
- [0] = { 0, 0 }, /* Reset, unused */
- [1] = { 4, 2 }, /* Undefined */
- [2] = { 0, 0 }, /* SVC, unused */
- [3] = { 4, 4 }, /* Prefetch abort */
- [4] = { 8, 8 }, /* Data abort */
- [5] = { 0, 0 }, /* HVC, unused */
- [6] = { 4, 4 }, /* IRQ, unused */
- [7] = { 4, 4 }, /* FIQ, unused */
-};
-
-static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
-{
- unsigned long cpsr;
- unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
- bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
- u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
- u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
-
- cpsr = mode | COMPAT_PSR_I_BIT;
-
- if (sctlr & (1 << 30))
- cpsr |= COMPAT_PSR_T_BIT;
- if (sctlr & (1 << 25))
- cpsr |= COMPAT_PSR_E_BIT;
-
- *vcpu_cpsr(vcpu) = cpsr;
-
- /* Note: These now point to the banked copies */
- *vcpu_spsr(vcpu) = new_spsr_value;
- *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
-
- /* Branch to exception vector */
- if (sctlr & (1 << 13))
- vect_offset += 0xffff0000;
- else /* always have security exceptions */
- vect_offset += vcpu_cp15(vcpu, c12_VBAR);
-
- *vcpu_pc(vcpu) = vect_offset;
-}
-
-static void inject_undef32(struct kvm_vcpu *vcpu)
-{
- prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
-}
-
-/*
- * Modelled after TakeDataAbortException() and TakePrefetchAbortException
- * pseudocode.
- */
-static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
- unsigned long addr)
-{
- u32 vect_offset;
- u32 *far, *fsr;
- bool is_lpae;
-
- if (is_pabt) {
- vect_offset = 12;
- far = &vcpu_cp15(vcpu, c6_IFAR);
- fsr = &vcpu_cp15(vcpu, c5_IFSR);
- } else { /* !iabt */
- vect_offset = 16;
- far = &vcpu_cp15(vcpu, c6_DFAR);
- fsr = &vcpu_cp15(vcpu, c5_DFSR);
- }
-
- prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
-
- *far = addr;
-
- /* Give the guest an IMPLEMENTATION DEFINED exception */
- is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
- if (is_lpae)
- *fsr = 1 << 9 | 0x34;
- else
- *fsr = 0x14;
-}
-
enum exception_type {
except_type_sync = 0,
except_type_irq = 0x80,
@@ -211,7 +129,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
- inject_abt32(vcpu, false, addr);
+ kvm_inject_dabt32(vcpu, addr);
else
inject_abt64(vcpu, false, addr);
}
@@ -227,7 +145,7 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
- inject_abt32(vcpu, true, addr);
+ kvm_inject_pabt32(vcpu, addr);
else
inject_abt64(vcpu, true, addr);
}
@@ -241,7 +159,7 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
- inject_undef32(vcpu);
+ kvm_inject_undef32(vcpu);
else
inject_undef64(vcpu);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index a0ee9b05e3d4..1830ebc227d1 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -842,13 +842,16 @@ static bool access_cntp_tval(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
u64 now = kvm_phys_timer_read();
+ u64 cval;
- if (p->is_write)
- ptimer->cnt_cval = p->regval + now;
- else
- p->regval = ptimer->cnt_cval - now;
+ if (p->is_write) {
+ kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL,
+ p->regval + now);
+ } else {
+ cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
+ p->regval = cval - now;
+ }
return true;
}
@@ -857,24 +860,10 @@ static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
-
- if (p->is_write) {
- /* ISTATUS bit is read-only */
- ptimer->cnt_ctl = p->regval & ~ARCH_TIMER_CTRL_IT_STAT;
- } else {
- u64 now = kvm_phys_timer_read();
-
- p->regval = ptimer->cnt_ctl;
- /*
- * Set ISTATUS bit if it's expired.
- * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
- * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
- * regardless of ENABLE bit for our implementation convenience.
- */
- if (ptimer->cnt_cval <= now)
- p->regval |= ARCH_TIMER_CTRL_IT_STAT;
- }
+ if (p->is_write)
+ kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval);
+ else
+ p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
return true;
}
@@ -883,12 +872,10 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
-
if (p->is_write)
- ptimer->cnt_cval = p->regval;
+ kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval);
else
- p->regval = ptimer->cnt_cval;
+ p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
return true;
}