From 8a7e75d47b68193339f8727cf4503271d0a0b1d0 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Tue, 2 Aug 2016 14:03:22 +1000 Subject: KVM: Add provisioning for ulong vm stats and u64 vcpu stats vms and vcpus have statistics associated with them which can be viewed within the debugfs. Currently it is assumed within the vcpu_stat_get() and vm_stat_get() functions that all of these statistics are represented as u32s, however the next patch adds some u64 vcpu statistics. Change all vcpu statistics to u64 and modify vcpu_stat_get() accordingly. Since vcpu statistics are per vcpu, they will only be updated by a single vcpu at a time so this shouldn't present a problem on 32-bit machines which can't atomically increment 64-bit numbers. However vm statistics could potentially be updated by multiple vcpus from that vm at a time. To avoid the overhead of atomics make all vm statistics ulong such that they are 64-bit on 64-bit systems where they can be atomically incremented and are 32-bit on 32-bit systems which may not be able to atomically increment 64-bit numbers. Modify vm_stat_get() to expect ulongs. Signed-off-by: Suraj Jitindar Singh Reviewed-by: David Matlack Acked-by: Christian Borntraeger Signed-off-by: Paul Mackerras --- arch/arm/include/asm/kvm_host.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index de338d93d11b..6ad21f04a922 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -183,15 +183,15 @@ struct kvm_vcpu_arch { }; struct kvm_vm_stat { - u32 remote_tlb_flush; + ulong remote_tlb_flush; }; struct kvm_vcpu_stat { - u32 halt_successful_poll; - u32 halt_attempted_poll; - u32 halt_poll_invalid; - u32 halt_wakeup; - u32 hvc_exit_stat; + u64 halt_successful_poll; + u64 halt_attempted_poll; + u64 halt_poll_invalid; + u64 halt_wakeup; + u64 hvc_exit_stat; u64 wfe_exit_stat; u64 wfi_exit_stat; u64 mmio_exit_user; -- cgit From dcadda146f4fd25a732382747f306465d337cda6 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 30 Aug 2016 17:05:55 +0100 Subject: arm/kvm: excise redundant cache maintenance When modifying Stage-2 page tables, we perform cache maintenance to account for non-coherent page table walks. However, this is unnecessary, as page table walks are guaranteed to be coherent in the presence of the virtualization extensions. Per ARM DDI 0406C.c, section B1.7 ("The Virtualization Extensions"), the virtualization extensions mandate the multiprocessing extensions. Per ARM DDI 0406C.c, section B3.10.1 ("General TLB maintenance requirements"), as described in the sub-section titled "TLB maintenance operations and the memory order model", this maintenance is not required in the presence of the multiprocessing extensions. Hence, we need not perform this cache maintenance when modifying Stage-2 entries. This patch removes the logic for performing the redundant maintenance. To ensure visibility and ordering of updates, a dsb(ishst) that was otherwise implicit in the maintenance is folded into kvm_set_pmd() and kvm_set_pte(). Signed-off-by: Mark Rutland Cc: Christoffer Dall Cc: Marc Zyngier Cc: kvmarm@lists.cs.columbia.edu Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_mmu.h | 28 ++-------------------------- arch/arm/kvm/mmu.c | 2 -- 2 files changed, 2 insertions(+), 28 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 3bb803d6814b..74a44727f8e1 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -63,37 +63,13 @@ void kvm_clear_hyp_idmap(void); static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd) { *pmd = new_pmd; - flush_pmd_entry(pmd); + dsb(ishst); } static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) { *pte = new_pte; - /* - * flush_pmd_entry just takes a void pointer and cleans the necessary - * cache entries, so we can reuse the function for ptes. - */ - flush_pmd_entry(pte); -} - -static inline void kvm_clean_pgd(pgd_t *pgd) -{ - clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); -} - -static inline void kvm_clean_pmd(pmd_t *pmd) -{ - clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t)); -} - -static inline void kvm_clean_pmd_entry(pmd_t *pmd) -{ - clean_pmd_entry(pmd); -} - -static inline void kvm_clean_pte(pte_t *pte) -{ - clean_pte_table(pte); + dsb(ishst); } static inline pte_t kvm_s2pte_mkwrite(pte_t pte) diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 29d0b23af2a9..344755dcc3b2 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -744,7 +744,6 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) if (!pgd) return -ENOMEM; - kvm_clean_pgd(pgd); kvm->arch.pgd = pgd; return 0; } @@ -936,7 +935,6 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, if (!cache) return 0; /* ignore calls from kvm_set_spte_hva */ pte = mmu_memory_cache_alloc(cache); - kvm_clean_pte(pte); pmd_populate_kernel(NULL, pmd, pte); get_page(virt_to_page(pmd)); } -- cgit From cf0ba18a441410aeaf3ef97eead3a3fa5381f46f Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Thu, 1 Sep 2016 13:16:03 +0200 Subject: KVM: arm/arm64: Get rid of exported aliases to static functions When rewriting the assembly code to C code, it was useful to have exported aliases or static functions so that we could keep the existing common C code unmodified and at the same time rewrite arm64 from assembly to C code, and later do the arm part. Now when both are done, we really don't need this level of indirection anymore, and it's time to save a few lines and brain cells. Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/hyp/switch.c | 4 +--- arch/arm/kvm/hyp/tlb.c | 15 ++++----------- 2 files changed, 5 insertions(+), 14 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index b13caa90cd44..37b336549700 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c @@ -134,7 +134,7 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) return true; } -static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) +int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *guest_ctxt; @@ -191,8 +191,6 @@ again: return exit_code; } -__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu); - static const char * const __hyp_panic_string[] = { [ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x", [ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x", diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c index a2636001e616..729652854f90 100644 --- a/arch/arm/kvm/hyp/tlb.c +++ b/arch/arm/kvm/hyp/tlb.c @@ -34,7 +34,7 @@ * As v7 does not support flushing per IPA, just nuke the whole TLB * instead, ignoring the ipa value. */ -static void __hyp_text __tlb_flush_vmid(struct kvm *kvm) +void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) { dsb(ishst); @@ -50,21 +50,14 @@ static void __hyp_text __tlb_flush_vmid(struct kvm *kvm) write_sysreg(0, VTTBR); } -__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm); - -static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) +void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { - __tlb_flush_vmid(kvm); + __kvm_tlb_flush_vmid(kvm); } -__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, - phys_addr_t ipa); - -static void __hyp_text __tlb_flush_vm_context(void) +void __hyp_text __kvm_flush_vm_context(void) { write_sysreg(0, TLBIALLNSNHIS); write_sysreg(0, ICIALLUIS); dsb(ish); } - -__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void); -- cgit From 427d7cacf97220844aa39146e11365655bbff8bd Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Sep 2016 09:28:42 +0100 Subject: arm64: KVM: Move the AArch32 conditional execution to common code It would make some sense to share the conditional execution code between 32 and 64bit. In order to achieve this, let's move that code to virt/kvm/arm/aarch32.c. While we're at it, drop a superfluous BUG_ON() that wasn't that useful. Following patches will migrate the 32bit port to that code base. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/emulate.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index af93e3ffc9f3..eda9ddd03e7c 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -221,9 +221,7 @@ static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) unsigned long cpsr = *vcpu_cpsr(vcpu); bool is_arm = !(cpsr & PSR_T_BIT); - BUG_ON(is_arm && (cpsr & PSR_IT_MASK)); - - if (!(cpsr & PSR_IT_MASK)) + if (is_arm || !(cpsr & PSR_IT_MASK)) return; cond = (cpsr & 0xe000) >> 13; -- cgit From 3aedd5c49e63c0d31a53b00ab906d48f53abb68b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Sep 2016 09:28:43 +0100 Subject: arm: KVM: Use common AArch32 conditional execution code Add the bit of glue and const-ification that is required to use the code inherited from the arm64 port, and move over to it. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_emulate.h | 34 ++++++++++--- arch/arm/kvm/Makefile | 1 + arch/arm/kvm/emulate.c | 97 -------------------------------------- 3 files changed, 28 insertions(+), 104 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index ee5328fc4b06..448d63cdcc3d 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -40,18 +40,28 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, *vcpu_reg(vcpu, reg_num) = val; } -bool kvm_condition_valid(struct kvm_vcpu *vcpu); -void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); +bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); +void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); void kvm_inject_undefined(struct kvm_vcpu *vcpu); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); +static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) +{ + return kvm_condition_valid32(vcpu); +} + +static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) +{ + kvm_skip_instr32(vcpu, is_wide_instr); +} + static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) { vcpu->arch.hcr = HCR_GUEST_MASK; } -static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) +static inline unsigned long vcpu_get_hcr(const struct kvm_vcpu *vcpu) { return vcpu->arch.hcr; } @@ -61,7 +71,7 @@ static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr) vcpu->arch.hcr = hcr; } -static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) +static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) { return 1; } @@ -71,9 +81,9 @@ static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc; } -static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) +static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) { - return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; + return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; } static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) @@ -93,11 +103,21 @@ static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) return cpsr_mode > USR_MODE;; } -static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) +static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) { return vcpu->arch.fault.hsr; } +static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) +{ + u32 hsr = kvm_vcpu_get_hsr(vcpu); + + if (hsr & HSR_CV) + return (hsr & HSR_COND) >> HSR_COND_SHIFT; + + return -1; +} + static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) { return vcpu->arch.fault.hxfar; diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index 10d77a66cad5..339ec88a15a6 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/ obj-y += kvm-arm.o init.o interrupts.o obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o +obj-y += $(KVM)/arm/aarch32.o obj-y += $(KVM)/arm/vgic/vgic.o obj-y += $(KVM)/arm/vgic/vgic-init.o diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index eda9ddd03e7c..ff9acd1b027b 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -161,103 +161,6 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) } } -/* - * A conditional instruction is allowed to trap, even though it - * wouldn't be executed. So let's re-implement the hardware, in - * software! - */ -bool kvm_condition_valid(struct kvm_vcpu *vcpu) -{ - unsigned long cpsr, cond, insn; - - /* - * Exception Code 0 can only happen if we set HCR.TGE to 1, to - * catch undefined instructions, and then we won't get past - * the arm_exit_handlers test anyway. - */ - BUG_ON(!kvm_vcpu_trap_get_class(vcpu)); - - /* Top two bits non-zero? Unconditional. */ - if (kvm_vcpu_get_hsr(vcpu) >> 30) - return true; - - cpsr = *vcpu_cpsr(vcpu); - - /* Is condition field valid? */ - if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT) - cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT; - else { - /* This can happen in Thumb mode: examine IT state. */ - unsigned long it; - - it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); - - /* it == 0 => unconditional. */ - if (it == 0) - return true; - - /* The cond for this insn works out as the top 4 bits. */ - cond = (it >> 4); - } - - /* Shift makes it look like an ARM-mode instruction */ - insn = cond << 28; - return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL; -} - -/** - * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block - * @vcpu: The VCPU pointer - * - * When exceptions occur while instructions are executed in Thumb IF-THEN - * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have - * to do this little bit of work manually. The fields map like this: - * - * IT[7:0] -> CPSR[26:25],CPSR[15:10] - */ -static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) -{ - unsigned long itbits, cond; - unsigned long cpsr = *vcpu_cpsr(vcpu); - bool is_arm = !(cpsr & PSR_T_BIT); - - if (is_arm || !(cpsr & PSR_IT_MASK)) - return; - - cond = (cpsr & 0xe000) >> 13; - itbits = (cpsr & 0x1c00) >> (10 - 2); - itbits |= (cpsr & (0x3 << 25)) >> 25; - - /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */ - if ((itbits & 0x7) == 0) - itbits = cond = 0; - else - itbits = (itbits << 1) & 0x1f; - - cpsr &= ~PSR_IT_MASK; - cpsr |= cond << 13; - cpsr |= (itbits & 0x1c) << (10 - 2); - cpsr |= (itbits & 0x3) << 25; - *vcpu_cpsr(vcpu) = cpsr; -} - -/** - * kvm_skip_instr - skip a trapped instruction and proceed to the next - * @vcpu: The vcpu pointer - */ -void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) -{ - bool is_thumb; - - is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT); - if (is_thumb && !is_wide_instr) - *vcpu_pc(vcpu) += 2; - else - *vcpu_pc(vcpu) += 4; - kvm_adjust_itstate(vcpu); -} - - /****************************************************************************** * Inject exceptions into the guest */ -- cgit From 1f7e378d1235a3cb5c9cdd8c62fe13cb6c3cf157 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Sep 2016 14:02:08 +0100 Subject: arm: KVM: Preserve pending Virtual Abort in world switch The HCR.VA bit is used to signal an Abort to a guest, and has the peculiar feature of getting cleared when the guest has taken the abort (this is the only bit that behaves as such in this register). This means that if we signal such an abort, we must leave it in the guest context until it disappears from HCR, and at which point it must be cleared from the context. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/hyp/switch.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index 37b336549700..9da16fd1005f 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c @@ -54,6 +54,15 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) { u32 val; + /* + * If we pended a virtual abort, preserve it until it gets + * cleared. See B1.9.9 (Virtual Abort exception) for details, + * but the crucial bit is the zeroing of HCR.VA in the + * pseudocode. + */ + if (vcpu->arch.hcr & HCR_VA) + vcpu->arch.hcr = read_sysreg(HCR); + write_sysreg(0, HCR); write_sysreg(0, HSTR); val = read_sysreg(HDCR); -- cgit From bfb78b5c98d0e6b6dbd9af9cd71e853670a34044 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Sep 2016 14:02:09 +0100 Subject: arm: KVM: Add Virtual Abort injection helper Now that we're able to context switch the HCR.VA bit, let's introduce a helper that injects an Abort into a vcpu. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_emulate.h | 1 + arch/arm/kvm/emulate.c | 12 ++++++++++++ 2 files changed, 13 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 448d63cdcc3d..9a8a45aaf19a 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -43,6 +43,7 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); void kvm_inject_undefined(struct kvm_vcpu *vcpu); +void kvm_inject_vabt(struct kvm_vcpu *vcpu); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index ff9acd1b027b..0064b86a2c87 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -303,3 +303,15 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) { inject_abt(vcpu, true, addr); } + +/** + * kvm_inject_vabt - inject an async abort / SError into the guest + * @vcpu: The VCPU to receive the exception + * + * It is assumed that this code is called from the VCPU thread and that the + * VCPU therefore is not currently executing guest code. + */ +void kvm_inject_vabt(struct kvm_vcpu *vcpu) +{ + vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VA); +} -- cgit From cc325cfc37b5b9e8814d7172e3a6e47d3b098ff1 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Sep 2016 14:02:10 +0100 Subject: arm: KVM: Add HYP async abort handler If we've exited the guest because it has triggered an asynchronous abort, a possible course of action is to let it know it screwed up by giving it a Virtual Abort to chew on. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/handle_exit.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 3f1ef0dbc899..863fdf42668e 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -160,6 +160,9 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, exit_handler = kvm_get_exit_handler(vcpu); return exit_handler(vcpu, run); + case ARM_EXCEPTION_DATA_ABORT: + kvm_inject_vabt(vcpu); + return 1; default: kvm_pr_unimpl("Unsupported exception type: %d", exception_index); -- cgit From 435bca5fe913e2e5f96881a77ab20e653f5ad894 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Sep 2016 14:02:11 +0100 Subject: arm: KVM: Allow an exit code to be tagged with a Virtual Abort An asynchronous abort can also be triggered whilst running at EL2. But instead of making that a new error code, we need to communicate it to the rest of KVM together with the exit reason. So let's hijack a single bit that allows the exception code to be tagged with a "pending Abort" information. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_asm.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 58faff5f1eb2..05e47faf96a1 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -21,6 +21,10 @@ #include +#define ARM_EXIT_WITH_ABORT_BIT 31 +#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT)) +#define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT)) + #define ARM_EXCEPTION_RESET 0 #define ARM_EXCEPTION_UNDEFINED 1 #define ARM_EXCEPTION_SOFTWARE 2 -- cgit From c39798f471259dacf56df6bfb2bd071dfe074486 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Sep 2016 14:02:12 +0100 Subject: arm: KVM: Handle async aborts delivered while at HYP Just like for arm64, we can handle asynchronous aborts being delivered at HYP while being caused by the guest. We use the exact same method to catch such an abort, and soldier on. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/hyp/entry.S | 31 +++++++++++++++++++++++++++++++ arch/arm/kvm/hyp/hyp-entry.S | 16 +++++++++++++++- 2 files changed, 46 insertions(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/entry.S b/arch/arm/kvm/hyp/entry.S index 21c238871c9e..60783f3b57cc 100644 --- a/arch/arm/kvm/hyp/entry.S +++ b/arch/arm/kvm/hyp/entry.S @@ -18,6 +18,7 @@ #include #include #include +#include .arch_extension virt @@ -63,6 +64,36 @@ ENTRY(__guest_exit) ldr lr, [r0, #4] mov r0, r1 + mrs r1, SPSR + mrs r2, ELR_hyp + mrc p15, 4, r3, c5, c2, 0 @ HSR + + /* + * Force loads and stores to complete before unmasking aborts + * and forcing the delivery of the exception. This gives us a + * single instruction window, which the handler will try to + * match. + */ + dsb sy + cpsie a + + .global abort_guest_exit_start +abort_guest_exit_start: + + isb + + .global abort_guest_exit_end +abort_guest_exit_end: + + /* + * If we took an abort, r0[31] will be set, and cmp will set + * the N bit in PSTATE. + */ + cmp r0, #0 + msrmi SPSR_cxsf, r1 + msrmi ELR_hyp, r2 + mcrmi p15, 4, r3, c5, c2, 0 @ HSR + bx lr ENDPROC(__guest_exit) diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S index 78091383a5d9..96beb53934c9 100644 --- a/arch/arm/kvm/hyp/hyp-entry.S +++ b/arch/arm/kvm/hyp/hyp-entry.S @@ -81,7 +81,6 @@ __kvm_hyp_vector: invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT - invalid_vector hyp_dabt ARM_EXCEPTION_DATA_ABORT invalid_vector hyp_fiq ARM_EXCEPTION_FIQ ENTRY(__hyp_do_panic) @@ -164,6 +163,21 @@ hyp_irq: load_vcpu r0 @ Load VCPU pointer to r0 b __guest_exit +hyp_dabt: + push {r0, r1} + mrs r0, ELR_hyp + ldr r1, =abort_guest_exit_start +THUMB( add r1, r1, #1) + cmp r0, r1 + ldrne r1, =abort_guest_exit_end +THUMB( addne r1, r1, #1) + cmpne r0, r1 + pop {r0, r1} + bne __hyp_panic + + orr r0, r0, #(1 << ARM_EXIT_WITH_ABORT_BIT) + eret + .ltorg .popsection -- cgit From 5d7bcf7d644a88183fb34fca12ad6ea40a8db7ab Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Sep 2016 14:02:13 +0100 Subject: arm: KVM: Inject a Virtual Abort if it was pending If we have caught an Abort whilst exiting, we've tagged the exit code with the pending information. In that case, let's re-inject the error into the guest, after having adjusted the PC if required. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/handle_exit.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 863fdf42668e..4eacb5cb60e8 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -144,6 +144,25 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, { exit_handle_fn exit_handler; + if (ARM_ABORT_PENDING(exception_index)) { + u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); + + /* + * HVC/SMC already have an adjusted PC, which we need + * to correct in order to return to after having + * injected the abort. + */ + if (hsr_ec == HSR_EC_HVC || hsr_ec == HSR_EC_SMC) { + u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2; + *vcpu_pc(vcpu) -= adj; + } + + kvm_inject_vabt(vcpu); + return 1; + } + + exception_index = ARM_EXCEPTION_CODE(exception_index); + switch (exception_index) { case ARM_EXCEPTION_IRQ: return 1; -- cgit From e656a1f91e701e6a1eddf8c029b45639b60877d5 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Sep 2016 14:02:14 +0100 Subject: arm: KVM: Drop unreachable HYP abort handlers Both data and prefetch aborts occuring in HYP lead to a well deserved panic. Let's get rid of these silly handlers. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/handle_exit.c | 27 --------------------------- 1 file changed, 27 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 4eacb5cb60e8..4e40d1955e35 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -28,14 +28,6 @@ typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); -static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - /* SVC called from Hyp mode should never get here */ - kvm_debug("SVC called from Hyp mode shouldn't go here\n"); - BUG(); - return -EINVAL; /* Squash warning */ -} - static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) { int ret; @@ -59,22 +51,6 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) return 1; } -static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - /* The hypervisor should never cause aborts */ - kvm_err("Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n", - kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu)); - return -EFAULT; -} - -static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - /* This is either an error in the ws. code or an external abort */ - kvm_err("Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n", - kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu)); - return -EFAULT; -} - /** * kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests * @vcpu: the vcpu pointer @@ -112,13 +88,10 @@ static exit_handle_fn arm_exit_handlers[] = { [HSR_EC_CP14_64] = kvm_handle_cp14_access, [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, [HSR_EC_CP10_ID] = kvm_handle_cp10_id, - [HSR_EC_SVC_HYP] = handle_svc_hyp, [HSR_EC_HVC] = handle_hvc, [HSR_EC_SMC] = handle_smc, [HSR_EC_IABT] = kvm_handle_guest_abort, - [HSR_EC_IABT_HYP] = handle_pabt_hyp, [HSR_EC_DABT] = kvm_handle_guest_abort, - [HSR_EC_DABT_HYP] = handle_dabt_hyp, }; static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) -- cgit From 4055710baca8cb067b059a635cf851eb86410a83 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Sep 2016 14:02:15 +0100 Subject: arm/arm64: KVM: Inject virtual abort when guest exits on external abort If we spot a data abort bearing the ESR_EL2.EA bit set, we know that this is an external abort, and that should be punished by the injection of an abort. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/mmu.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 344755dcc3b2..60e0c1ac86e8 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1432,6 +1432,11 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret, idx; is_iabt = kvm_vcpu_trap_is_iabt(vcpu); + if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) { + kvm_inject_vabt(vcpu); + return 1; + } + fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), -- cgit From 21977a4c57cd92c71113e21319302b5a399f9d2d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Sep 2016 14:02:16 +0100 Subject: arm/arm64: KVM: Remove external abort test from MMIO handling As we know handle external aborts pretty early, we can get rid of its handling in the MMIO code (which was a bit odd to begin with...). Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/mmio.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 10f80a6c797a..b6e715fd3c90 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c @@ -126,12 +126,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len) int access_size; bool sign_extend; - if (kvm_vcpu_dabt_isextabt(vcpu)) { - /* cache operation on I/O addr, tell guest unsupported */ - kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); - return 1; - } - if (kvm_vcpu_dabt_iss1tw(vcpu)) { /* page table accesses IO mem: tell guest to fix its TTBR */ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); -- cgit From 235539b48a2357da28f52d66d04bec04f3dcb9dd Mon Sep 17 00:00:00 2001 From: Luiz Capitulino Date: Wed, 7 Sep 2016 14:47:23 -0400 Subject: kvm: add stubs for arch specific debugfs support Two stubs are added: o kvm_arch_has_vcpu_debugfs(): must return true if the arch supports creating debugfs entries in the vcpu debugfs dir (which will be implemented by the next commit) o kvm_arch_create_vcpu_debugfs(): code that creates debugfs entries in the vcpu debugfs dir For x86, this commit introduces a new file to avoid growing arch/x86/kvm/x86.c even more. Signed-off-by: Luiz Capitulino Signed-off-by: Paolo Bonzini --- arch/arm/kvm/arm.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 75f130ef6504..c638935baad6 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -144,6 +144,16 @@ out_fail_alloc: return ret; } +bool kvm_arch_has_vcpu_debugfs(void) +{ + return false; +} + +int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) +{ + return 0; +} + int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; -- cgit From 163352eb6827e5b556b0860edc19327798fcc5ff Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Mon, 12 Sep 2016 15:49:21 +0100 Subject: ARM: Introduce MPIDR_LEVEL_SHIFT macro vgic-v3 driver uses architecture specific MPIDR_LEVEL_SHIFT macro to encode the affinity in a form compatible with ICC_SGI* registers. Unfortunately, that macro is missing on ARM, so let's add it. Cc: Russell King Acked-by: Marc Zyngier Signed-off-by: Vladimir Murzin Signed-off-by: Christoffer Dall --- arch/arm/include/asm/cputype.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 1ee94c716a7f..e2d94c1b07b8 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -55,6 +55,7 @@ #define MPIDR_LEVEL_BITS 8 #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) +#define MPIDR_LEVEL_SHIFT(level) (MPIDR_LEVEL_BITS * level) #define MPIDR_AFFINITY_LEVEL(mpidr, level) \ ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) -- cgit From 4f2546384150e78cad8045e59a9587fabcd9f9fe Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Mon, 12 Sep 2016 15:49:22 +0100 Subject: ARM: Move system register accessors to asm/cp15.h Headers linux/irqchip/arm-gic.v3.h and arch/arm/include/asm/kvm_hyp.h are included in virt/kvm/arm/hyp/vgic-v3-sr.c and both define macros called __ACCESS_CP15 and __ACCESS_CP15_64 which obviously creates a conflict. These macros were introduced independently for GIC and KVM and, in fact, do the same thing. As an option we could add prefixes to KVM and GIC version of macros so they won't clash, but it'd introduce code duplication. Alternatively, we could keep macro in, say, GIC header and include it in KVM one (or vice versa), but such dependency would not look nicer. So we follow arm64 way (it handles this via sysreg.h) and move only single set of macros to asm/cp15.h Cc: Russell King Acked-by: Marc Zyngier Signed-off-by: Vladimir Murzin Signed-off-by: Christoffer Dall --- arch/arm/include/asm/arch_gicv3.h | 27 +++++++++++---------------- arch/arm/include/asm/cp15.h | 15 +++++++++++++++ arch/arm/include/asm/kvm_hyp.h | 15 +-------------- 3 files changed, 27 insertions(+), 30 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index e08d15184056..af25c32b1ccc 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h @@ -22,9 +22,7 @@ #include #include - -#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2 -#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm +#include #define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1) #define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1) @@ -102,58 +100,55 @@ static inline void gic_write_eoir(u32 irq) { - asm volatile("mcr " __stringify(ICC_EOIR1) : : "r" (irq)); + write_sysreg(irq, ICC_EOIR1); isb(); } static inline void gic_write_dir(u32 val) { - asm volatile("mcr " __stringify(ICC_DIR) : : "r" (val)); + write_sysreg(val, ICC_DIR); isb(); } static inline u32 gic_read_iar(void) { - u32 irqstat; + u32 irqstat = read_sysreg(ICC_IAR1); - asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat)); dsb(sy); + return irqstat; } static inline void gic_write_pmr(u32 val) { - asm volatile("mcr " __stringify(ICC_PMR) : : "r" (val)); + write_sysreg(val, ICC_PMR); } static inline void gic_write_ctlr(u32 val) { - asm volatile("mcr " __stringify(ICC_CTLR) : : "r" (val)); + write_sysreg(val, ICC_CTLR); isb(); } static inline void gic_write_grpen1(u32 val) { - asm volatile("mcr " __stringify(ICC_IGRPEN1) : : "r" (val)); + write_sysreg(val, ICC_IGRPEN1); isb(); } static inline void gic_write_sgi1r(u64 val) { - asm volatile("mcrr " __stringify(ICC_SGI1R) : : "r" (val)); + write_sysreg(val, ICC_SGI1R); } static inline u32 gic_read_sre(void) { - u32 val; - - asm volatile("mrc " __stringify(ICC_SRE) : "=r" (val)); - return val; + return read_sysreg(ICC_SRE); } static inline void gic_write_sre(u32 val) { - asm volatile("mcr " __stringify(ICC_SRE) : : "r" (val)); + write_sysreg(val, ICC_SRE); isb(); } diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index c3f11524f10c..dbdbce1b3a72 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -49,6 +49,21 @@ #ifdef CONFIG_CPU_CP15 +#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ + "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 +#define __ACCESS_CP15_64(Op1, CRm) \ + "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 + +#define __read_sysreg(r, w, c, t) ({ \ + t __val; \ + asm volatile(r " " c : "=r" (__val)); \ + __val; \ +}) +#define read_sysreg(...) __read_sysreg(__VA_ARGS__) + +#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) +#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) + extern unsigned long cr_alignment; /* defined in entry-armv.S */ static inline unsigned long get_cr(void) diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index 6eaff28f2ff3..e604ad68a06b 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h @@ -20,28 +20,15 @@ #include #include +#include #include #include #define __hyp_text __section(.hyp.text) notrace -#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ - "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 -#define __ACCESS_CP15_64(Op1, CRm) \ - "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 #define __ACCESS_VFP(CRn) \ "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32 -#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) -#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) - -#define __read_sysreg(r, w, c, t) ({ \ - t __val; \ - asm volatile(r " " c : "=r" (__val)); \ - __val; \ -}) -#define read_sysreg(...) __read_sysreg(__VA_ARGS__) - #define write_special(v, r) \ asm volatile("msr " __stringify(r) ", %0" : : "r" (v)) #define read_special(r) ({ \ -- cgit From a078bedf17c2e43819fea54bdfd5793845142e3a Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Mon, 12 Sep 2016 15:49:23 +0100 Subject: ARM: gic-v3: Introduce 32-to-64-bit mappings for GICv3 cpu registers vgic-v3 save/restore routines are written in such way that they map arm64 system register naming nicely, but it does not fit to arm world. To keep virt/kvm/arm/hyp/vgic-v3-sr.c untouched we create a mapping with a function for each register mapping the 32-bit to the 64-bit accessors. Please, note that 64-bit wide ICH_LR is split in two 32-bit halves (ICH_LR and ICH_LRC) accessed independently. Acked-by: Marc Zyngier Signed-off-by: Vladimir Murzin Signed-off-by: Christoffer Dall --- arch/arm/include/asm/arch_gicv3.h | 64 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index af25c32b1ccc..996848efdaa9 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h @@ -96,6 +96,70 @@ #define ICH_AP1R2 __AP1Rx(2) #define ICH_AP1R3 __AP1Rx(3) +/* A32-to-A64 mappings used by VGIC save/restore */ + +#define CPUIF_MAP(a32, a64) \ +static inline void write_ ## a64(u32 val) \ +{ \ + write_sysreg(val, a32); \ +} \ +static inline u32 read_ ## a64(void) \ +{ \ + return read_sysreg(a32); \ +} \ + +#define CPUIF_MAP_LO_HI(a32lo, a32hi, a64) \ +static inline void write_ ## a64(u64 val) \ +{ \ + write_sysreg(lower_32_bits(val), a32lo);\ + write_sysreg(upper_32_bits(val), a32hi);\ +} \ +static inline u64 read_ ## a64(void) \ +{ \ + u64 val = read_sysreg(a32lo); \ + \ + val |= (u64)read_sysreg(a32hi) << 32; \ + \ + return val; \ +} + +CPUIF_MAP(ICH_HCR, ICH_HCR_EL2) +CPUIF_MAP(ICH_VTR, ICH_VTR_EL2) +CPUIF_MAP(ICH_MISR, ICH_MISR_EL2) +CPUIF_MAP(ICH_EISR, ICH_EISR_EL2) +CPUIF_MAP(ICH_ELSR, ICH_ELSR_EL2) +CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2) +CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2) +CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2) +CPUIF_MAP(ICH_AP0R1, ICH_AP0R1_EL2) +CPUIF_MAP(ICH_AP0R0, ICH_AP0R0_EL2) +CPUIF_MAP(ICH_AP1R3, ICH_AP1R3_EL2) +CPUIF_MAP(ICH_AP1R2, ICH_AP1R2_EL2) +CPUIF_MAP(ICH_AP1R1, ICH_AP1R1_EL2) +CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2) +CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2) +CPUIF_MAP(ICC_SRE, ICC_SRE_EL1) + +CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2) +CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2) +CPUIF_MAP_LO_HI(ICH_LR13, ICH_LRC13, ICH_LR13_EL2) +CPUIF_MAP_LO_HI(ICH_LR12, ICH_LRC12, ICH_LR12_EL2) +CPUIF_MAP_LO_HI(ICH_LR11, ICH_LRC11, ICH_LR11_EL2) +CPUIF_MAP_LO_HI(ICH_LR10, ICH_LRC10, ICH_LR10_EL2) +CPUIF_MAP_LO_HI(ICH_LR9, ICH_LRC9, ICH_LR9_EL2) +CPUIF_MAP_LO_HI(ICH_LR8, ICH_LRC8, ICH_LR8_EL2) +CPUIF_MAP_LO_HI(ICH_LR7, ICH_LRC7, ICH_LR7_EL2) +CPUIF_MAP_LO_HI(ICH_LR6, ICH_LRC6, ICH_LR6_EL2) +CPUIF_MAP_LO_HI(ICH_LR5, ICH_LRC5, ICH_LR5_EL2) +CPUIF_MAP_LO_HI(ICH_LR4, ICH_LRC4, ICH_LR4_EL2) +CPUIF_MAP_LO_HI(ICH_LR3, ICH_LRC3, ICH_LR3_EL2) +CPUIF_MAP_LO_HI(ICH_LR2, ICH_LRC2, ICH_LR2_EL2) +CPUIF_MAP_LO_HI(ICH_LR1, ICH_LRC1, ICH_LR1_EL2) +CPUIF_MAP_LO_HI(ICH_LR0, ICH_LRC0, ICH_LR0_EL2) + +#define read_gicreg(r) read_##r() +#define write_gicreg(v, r) write_##r(v) + /* Low-level accessors */ static inline void gic_write_eoir(u32 irq) -- cgit From acda5430bee4621f218391d0bcfbe4412adb3554 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Mon, 12 Sep 2016 15:49:24 +0100 Subject: ARM: KVM: Support vgic-v3 This patch allows to build and use vgic-v3 in 32-bit mode. Unfortunately, it can not be split in several steps without extra stubs to keep patches independent and bisectable. For instance, virt/kvm/arm/vgic/vgic-v3.c uses function from vgic-v3-sr.c, handling access to GICv3 cpu interface from the guest requires vgic_v3.vgic_sre to be already defined. It is how support has been done: * handle SGI requests from the guest * report configured SRE on access to GICv3 cpu interface from the guest * required vgic-v3 macros are provided via uapi.h * static keys are used to select GIC backend * to make vgic-v3 build KVM_ARM_VGIC_V3 guard is removed along with the static inlines Acked-by: Marc Zyngier Reviewed-by: Christoffer Dall Signed-off-by: Vladimir Murzin Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_asm.h | 3 +++ arch/arm/include/asm/kvm_host.h | 5 +++++ arch/arm/include/asm/kvm_hyp.h | 3 +++ arch/arm/include/uapi/asm/kvm.h | 7 +++++++ arch/arm/kvm/Makefile | 2 ++ arch/arm/kvm/coproc.c | 35 +++++++++++++++++++++++++++++++++++ arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/switch.c | 12 ++++++++++-- 8 files changed, 66 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 05e47faf96a1..d7ea6bcb29bf 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -72,6 +72,9 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern void __init_stage2_translation(void); extern void __kvm_hyp_reset(unsigned long); + +extern u64 __vgic_v3_get_ich_vtr_el2(void); +extern void __vgic_v3_init_lrs(void); #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index de338d93d11b..c2c40a7ee738 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -39,7 +39,12 @@ #include + +#ifdef CONFIG_ARM_GIC_V3 +#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS +#else #define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS +#endif #define KVM_REQ_VCPU_EXIT 8 diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index e604ad68a06b..343135ede5fa 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h @@ -106,6 +106,9 @@ void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); void __sysreg_save_state(struct kvm_cpu_context *ctxt); void __sysreg_restore_state(struct kvm_cpu_context *ctxt); +void __vgic_v3_save_state(struct kvm_vcpu *vcpu); +void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); + void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp); void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp); static inline bool __vfp_enabled(void) diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index a2b3eb313a25..b38c10c73579 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -84,6 +84,13 @@ struct kvm_regs { #define KVM_VGIC_V2_DIST_SIZE 0x1000 #define KVM_VGIC_V2_CPU_SIZE 0x2000 +/* Supported VGICv3 address types */ +#define KVM_VGIC_V3_ADDR_TYPE_DIST 2 +#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 + +#define KVM_VGIC_V3_DIST_SIZE SZ_64K +#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) + #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ #define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */ diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index 339ec88a15a6..f19842ea5418 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile @@ -27,8 +27,10 @@ obj-y += $(KVM)/arm/vgic/vgic.o obj-y += $(KVM)/arm/vgic/vgic-init.o obj-y += $(KVM)/arm/vgic/vgic-irqfd.o obj-y += $(KVM)/arm/vgic/vgic-v2.o +obj-y += $(KVM)/arm/vgic/vgic-v3.o obj-y += $(KVM)/arm/vgic/vgic-mmio.o obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o +obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o obj-y += $(KVM)/irqchip.o obj-y += $(KVM)/arm/arch_timer.o diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 1bb2b79c01ff..3e5e4194ef86 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -228,6 +228,35 @@ bool access_vm_reg(struct kvm_vcpu *vcpu, return true; } +static bool access_gic_sgi(struct kvm_vcpu *vcpu, + const struct coproc_params *p, + const struct coproc_reg *r) +{ + u64 reg; + + if (!p->is_write) + return read_from_write_only(vcpu, p); + + reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32; + reg |= *vcpu_reg(vcpu, p->Rt1) ; + + vgic_v3_dispatch_sgi(vcpu, reg); + + return true; +} + +static bool access_gic_sre(struct kvm_vcpu *vcpu, + const struct coproc_params *p, + const struct coproc_reg *r) +{ + if (p->is_write) + return ignore_write(vcpu, p); + + *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; + + return true; +} + /* * We could trap ID_DFR0 and tell the guest we don't support performance * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was @@ -361,10 +390,16 @@ static const struct coproc_reg cp15_regs[] = { { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32, access_vm_reg, reset_unknown, c10_AMAIR1}, + /* ICC_SGI1R */ + { CRm64(12), Op1( 0), is64, access_gic_sgi}, + /* VBAR: swapped by interrupt.S. */ { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, NULL, reset_val, c12_VBAR, 0x00000000 }, + /* ICC_SRE */ + { CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre }, + /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */ { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32, access_vm_reg, reset_val, c13_CID, 0x00000000 }, diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 8dfa5f7f9290..3023bb530edf 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -5,6 +5,7 @@ KVM=../../../../virt/kvm obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += tlb.o diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index 9da16fd1005f..92678b7bd046 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c @@ -14,6 +14,7 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ +#include #include #include @@ -83,14 +84,21 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) write_sysreg(read_sysreg(MIDR), VPIDR); } + static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) { - __vgic_v2_save_state(vcpu); + if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) + __vgic_v3_save_state(vcpu); + else + __vgic_v2_save_state(vcpu); } static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) { - __vgic_v2_restore_state(vcpu); + if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) + __vgic_v3_restore_state(vcpu); + else + __vgic_v2_restore_state(vcpu); } static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) -- cgit From 6134993789b76d85102d68a286b8b82cb53b52cb Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Thu, 22 Sep 2016 09:18:01 +0100 Subject: arm64: KVM: Remove duplicating init code for setting VMID By now both VHE and non-VHE initialisation sequences query supported VMID size. Lets keep only single instance of this code under init_common_resources(). Acked-by: Marc Zyngier Signed-off-by: Vladimir Murzin Signed-off-by: Christoffer Dall --- arch/arm/kvm/arm.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 75f130ef6504..85a3f90cb8ec 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -1178,6 +1178,10 @@ static int init_common_resources(void) return -ENOMEM; } + /* set size of VMID supported by CPU */ + kvm_vmid_bits = kvm_get_vmid_bits(); + kvm_info("%d-bit VMID\n", kvm_vmid_bits); + return 0; } @@ -1243,10 +1247,6 @@ static void teardown_hyp_mode(void) static int init_vhe_mode(void) { - /* set size of VMID supported by CPU */ - kvm_vmid_bits = kvm_get_vmid_bits(); - kvm_info("%d-bit VMID\n", kvm_vmid_bits); - kvm_info("VHE mode initialized successfully\n"); return 0; } @@ -1330,10 +1330,6 @@ static int init_hyp_mode(void) } } - /* set size of VMID supported by CPU */ - kvm_vmid_bits = kvm_get_vmid_bits(); - kvm_info("%d-bit VMID\n", kvm_vmid_bits); - kvm_info("Hyp mode initialized successfully\n"); return 0; -- cgit From 3d9cd95f90b2987ef95182a4340a9150e06c4253 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 23 Sep 2016 14:23:43 +0100 Subject: ARM: gic-v3: Work around definition of gic_write_bpr1 A new accessor for gic_write_bpr1 is added to arch_gicv3.h in 4.9, whilst the CP15 accessors are redifined in a separate branch. This leads to a horrible clash, where the new accessor ends up with a crap "asm volatile" definition. Work around this by carrying our own definition of gic_write_bpr1, creating a small conflict which will be obvious to resolve. Signed-off-by: Marc Zyngier --- arch/arm/include/asm/arch_gicv3.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index 996848efdaa9..1fee657d3827 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h @@ -216,6 +216,15 @@ static inline void gic_write_sre(u32 val) isb(); } +static inline void gic_write_bpr1(u32 val) +{ +#if defined(__write_sysreg) && defined(ICC_BPR1) + write_sysreg(val, ICC_BPR1); +#else + asm volatile("mcr " __stringify(ICC_BPR1) : : "r" (val)); +#endif +} + /* * Even in 32bit systems that use LPAE, there is no guarantee that the I/O * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't -- cgit