From 36ee41d161c67a6fcf696d4817a0da31f778938c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 30 Jan 2018 10:51:32 +1100 Subject: KVM: PPC: Book3S HV: Drop locks before reading guest memory Running with CONFIG_DEBUG_ATOMIC_SLEEP reveals that HV KVM tries to read guest memory, in order to emulate guest instructions, while preempt is disabled and a vcore lock is held. This occurs in kvmppc_handle_exit_hv(), called from post_guest_process(), when emulating guest doorbell instructions on POWER9 systems, and also when checking whether we have hit a hypervisor breakpoint. Reading guest memory can cause a page fault and thus cause the task to sleep, so we need to avoid reading guest memory while holding a spinlock or when preempt is disabled. To fix this, we move the preempt_enable() in kvmppc_run_core() to before the loop that calls post_guest_process() for each vcore that has just run, and we drop and re-take the vcore lock around the calls to kvmppc_emulate_debug_inst() and kvmppc_emulate_doorbell_instr(). Dropping the lock is safe with respect to the iteration over the runnable vcpus in post_guest_process(); for_each_runnable_thread is actually safe to use locklessly. It is possible for a vcpu to become runnable and add itself to the runnable_threads array (code near the beginning of kvmppc_run_vcpu()) and then get included in the iteration in post_guest_process despite the fact that it has not just run. This is benign because vcpu->arch.trap and vcpu->arch.ceded will be zero. Cc: stable@vger.kernel.org # v4.13+ Fixes: 579006944e0d ("KVM: PPC: Book3S HV: Virtualize doorbell facility on POWER9") Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index e5f81fc108e0..aa6130b56b5e 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1008,8 +1008,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *tvcpu; - if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return EMULATE_FAIL; if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE) return RESUME_GUEST; if (get_op(inst) != 31) @@ -1059,6 +1057,7 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +/* Called with vcpu->arch.vcore->lock held */ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, struct task_struct *tsk) { @@ -1179,7 +1178,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, swab32(vcpu->arch.emul_inst) : vcpu->arch.emul_inst; if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { + /* Need vcore unlocked to call kvmppc_get_last_inst */ + spin_unlock(&vcpu->arch.vcore->lock); r = kvmppc_emulate_debug_inst(run, vcpu); + spin_lock(&vcpu->arch.vcore->lock); } else { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; @@ -1194,8 +1196,13 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, */ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: r = EMULATE_FAIL; - if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) + if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && + cpu_has_feature(CPU_FTR_ARCH_300)) { + /* Need vcore unlocked to call kvmppc_get_last_inst */ + spin_unlock(&vcpu->arch.vcore->lock); r = kvmppc_emulate_doorbell_instr(vcpu); + spin_lock(&vcpu->arch.vcore->lock); + } if (r == EMULATE_FAIL) { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; @@ -2946,13 +2953,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) /* make sure updates to secondary vcpu structs are visible now */ smp_mb(); + preempt_enable(); + for (sub = 0; sub < core_info.n_subcores; ++sub) { pvc = core_info.vc[sub]; post_guest_process(pvc, pvc == vc); } spin_lock(&vc->lock); - preempt_enable(); out: vc->vcore_state = VCORE_INACTIVE; -- cgit From 07ae5389e98c53bb9e9f308fce9c903bc3ee7720 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Wed, 31 Jan 2018 22:24:58 +0100 Subject: KVM: PPC: Book3S PR: Fix svcpu copying with preemption enabled When copying between the vcpu and svcpu, we may get scheduled away onto a different host CPU which in turn means our svcpu pointer may change. That means we need to atomically copy to and from the svcpu with preemption disabled, so that all code around it always sees a coherent state. Reported-by: Simon Guo Fixes: 3d3319b45eea ("KVM: PPC: Book3S: PR: Enable interrupts earlier") Signed-off-by: Alexander Graf Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_book3s.h | 6 ++---- arch/powerpc/kvm/book3s_interrupts.S | 4 +--- arch/powerpc/kvm/book3s_pr.c | 20 +++++++++----------- 3 files changed, 12 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 9a667007bff8..376ae803b69c 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -249,10 +249,8 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm); extern int kvmppc_hcall_impl_pr(unsigned long cmd); extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); -extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, - struct kvm_vcpu *vcpu); -extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, - struct kvmppc_book3s_shadow_vcpu *svcpu); +extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu); +extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu); extern int kvm_irq_bypass; static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 901e6fe00c39..c18e845019ec 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -96,7 +96,7 @@ kvm_start_entry: kvm_start_lightweight: /* Copy registers into shadow vcpu so we can access them in real mode */ - GET_SHADOW_VCPU(r3) + mr r3, r4 bl FUNC(kvmppc_copy_to_svcpu) nop REST_GPR(4, r1) @@ -165,9 +165,7 @@ after_sprg3_load: stw r12, VCPU_TRAP(r3) /* Transfer reg values from shadow vcpu back to vcpu struct */ - /* On 64-bit, interrupts are still off at this point */ - GET_SHADOW_VCPU(r4) bl FUNC(kvmppc_copy_from_svcpu) nop diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index d0dc8624198f..d6df74a2f744 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -120,7 +120,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) #ifdef CONFIG_PPC_BOOK3S_64 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); if (svcpu->in_use) { - kvmppc_copy_from_svcpu(vcpu, svcpu); + kvmppc_copy_from_svcpu(vcpu); } memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; @@ -142,9 +142,10 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) } /* Copy data needed by real-mode code from vcpu to shadow vcpu */ -void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, - struct kvm_vcpu *vcpu) +void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) { + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + svcpu->gpr[0] = vcpu->arch.gpr[0]; svcpu->gpr[1] = vcpu->arch.gpr[1]; svcpu->gpr[2] = vcpu->arch.gpr[2]; @@ -176,17 +177,14 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, if (cpu_has_feature(CPU_FTR_ARCH_207S)) vcpu->arch.entry_ic = mfspr(SPRN_IC); svcpu->in_use = true; + + svcpu_put(svcpu); } /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ -void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, - struct kvmppc_book3s_shadow_vcpu *svcpu) +void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) { - /* - * vcpu_put would just call us again because in_use hasn't - * been updated yet. - */ - preempt_disable(); + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); /* * Maybe we were already preempted and synced the svcpu from @@ -232,7 +230,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, svcpu->in_use = false; out: - preempt_enable(); + svcpu_put(svcpu); } static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) -- cgit From 57ea5f161a7de5b1913c212d04f57a175b159fdf Mon Sep 17 00:00:00 2001 From: Ulf Magnusson Date: Mon, 5 Feb 2018 02:21:14 +0100 Subject: KVM: PPC: Book3S PR: Fix broken select due to misspelling Commit 76d837a4c0f9 ("KVM: PPC: Book3S PR: Don't include SPAPR TCE code on non-pseries platforms") added a reference to the globally undefined symbol PPC_SERIES. Looking at the rest of the commit, PPC_PSERIES was probably intended. Change PPC_SERIES to PPC_PSERIES. Discovered with the https://github.com/ulfalizer/Kconfiglib/blob/master/examples/list_undefined.py script. Fixes: 76d837a4c0f9 ("KVM: PPC: Book3S PR: Don't include SPAPR TCE code on non-pseries platforms") Cc: stable@vger.kernel.org # v4.12+ Signed-off-by: Ulf Magnusson Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index b12b8eb39c29..648160334abf 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -68,7 +68,7 @@ config KVM_BOOK3S_64 select KVM_BOOK3S_64_HANDLER select KVM select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE - select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV) + select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_PSERIES || PPC_POWERNV) ---help--- Support running unmodified book3s_64 and book3s_32 guest kernels in virtual machines on book3s_64 host processors. -- cgit From 05f2bb0313a2855e491dadfc8319b7da261d7074 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 7 Feb 2018 19:49:54 +1100 Subject: KVM: PPC: Book3S HV: Fix handling of secondary HPTEG in HPT resizing code This fixes the computation of the HPTE index to use when the HPT resizing code encounters a bolted HPTE which is stored in its secondary HPTE group. The code inverts the HPTE group number, which is correct, but doesn't then mask it with new_hash_mask. As a result, new_pteg will be effectively negative, resulting in new_hptep pointing before the new HPT, which will corrupt memory. In addition, this removes two BUG_ON statements. The condition that the BUG_ONs were testing -- that we have computed the hash value incorrectly -- has never been observed in testing, and if it did occur, would only affect the guest, not the host. Given that BUG_ON should only be used in conditions where the kernel (i.e. the host kernel, in this case) can't possibly continue execution, it is not appropriate here. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 966097232d21..d19649960bbf 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -1329,12 +1329,8 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, } new_pteg = hash & new_hash_mask; - if (vpte & HPTE_V_SECONDARY) { - BUG_ON(~pteg != (hash & old_hash_mask)); - new_pteg = ~new_pteg; - } else { - BUG_ON(pteg != (hash & old_hash_mask)); - } + if (vpte & HPTE_V_SECONDARY) + new_pteg = ~hash & new_hash_mask; new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP); new_hptep = (__be64 *)(new->virt + (new_idx << 4)); -- cgit From 790a9df5fbef982f2a6992194fe497dd2b794a3d Mon Sep 17 00:00:00 2001 From: David Gibson Date: Fri, 2 Feb 2018 14:29:08 +1100 Subject: KVM: PPC: Book3S HV: Make HPT resizing work on POWER9 This adds code to enable the HPT resizing code to work on POWER9, which uses a slightly modified HPT entry format compared to POWER8. On POWER9, we convert HPTEs read from the HPT from the new format to the old format so that the rest of the HPT resizing code can work as before. HPTEs written to the new HPT are converted to the new format as the last step before writing them into the new HPT. This takes out the checks added by commit bcd3bb63dbc8 ("KVM: PPC: Book3S HV: Disable HPT resizing on POWER9 for now", 2017-02-18), now that HPT resizing works on POWER9. On POWER9, when we pivot to the new HPT, we now call kvmppc_setup_partition_table() to update the partition table in order to make the hardware use the new HPT. [paulus@ozlabs.org - added kvmppc_setup_partition_table() call, wrote commit message.] Tested-by: Laurent Vivier Signed-off-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 30 +++++++++++++++++++++++------- arch/powerpc/kvm/powerpc.c | 3 +-- 2 files changed, 24 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index d19649960bbf..cb34be7d1a49 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -1261,6 +1261,11 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, /* Nothing to do */ goto out; + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + rpte = be64_to_cpu(hptep[1]); + vpte = hpte_new_to_old_v(vpte, rpte); + } + /* Unmap */ rev = &old->rev[idx]; guest_rpte = rev->guest_rpte; @@ -1290,7 +1295,6 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, /* Reload PTE after unmap */ vpte = be64_to_cpu(hptep[0]); - BUG_ON(vpte & HPTE_V_VALID); BUG_ON(!(vpte & HPTE_V_ABSENT)); @@ -1299,6 +1303,12 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, goto out; rpte = be64_to_cpu(hptep[1]); + + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + vpte = hpte_new_to_old_v(vpte, rpte); + rpte = hpte_new_to_old_r(rpte); + } + pshift = kvmppc_hpte_base_page_shift(vpte, rpte); avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23); pteg = idx / HPTES_PER_GROUP; @@ -1336,6 +1346,10 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, new_hptep = (__be64 *)(new->virt + (new_idx << 4)); replace_vpte = be64_to_cpu(new_hptep[0]); + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + unsigned long replace_rpte = be64_to_cpu(new_hptep[1]); + replace_vpte = hpte_new_to_old_v(replace_vpte, replace_rpte); + } if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) { BUG_ON(new->order >= old->order); @@ -1351,6 +1365,11 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, /* Discard the previous HPTE */ } + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + rpte = hpte_old_to_new_r(vpte, rpte); + vpte = hpte_old_to_new_v(vpte); + } + new_hptep[1] = cpu_to_be64(rpte); new->rev[new_idx].guest_rpte = guest_rpte; /* No need for a barrier, since new HPT isn't active */ @@ -1368,12 +1387,6 @@ static int resize_hpt_rehash(struct kvm_resize_hpt *resize) unsigned long i; int rc; - /* - * resize_hpt_rehash_hpte() doesn't handle the new-format HPTEs - * that POWER9 uses, and could well hit a BUG_ON on POWER9. - */ - if (cpu_has_feature(CPU_FTR_ARCH_300)) - return -EIO; for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) { rc = resize_hpt_rehash_hpte(resize, i); if (rc != 0) @@ -1404,6 +1417,9 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize) synchronize_srcu_expedited(&kvm->srcu); + if (cpu_has_feature(CPU_FTR_ARCH_300)) + kvmppc_setup_partition_table(kvm); + resize_hpt_debug(resize, "resize_hpt_pivot() done\n"); } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 77eb25abc601..cf86aeb43fcf 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -633,8 +633,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = 1; break; case KVM_CAP_SPAPR_RESIZE_HPT: - /* Disable this on POWER9 until code handles new HPTE format */ - r = !!hv_enabled && !cpu_has_feature(CPU_FTR_ARCH_300); + r = !!hv_enabled; break; #endif #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE -- cgit From d20fe50a7b3c8f936f7347e9cab2e9dd89c1199d Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 8 Feb 2018 18:38:53 +0100 Subject: KVM: PPC: Book3S HV: Branch inside feature section We ended up with code that did a conditional branch inside a feature section to code outside of the feature section. Depending on how the object file gets organized, that might mean we exceed the 14bit relocation limit for conditional branches: arch/powerpc/kvm/built-in.o:arch/powerpc/kvm/book3s_hv_rmhandlers.S:416:(__ftr_alt_97+0x8): relocation truncated to fit: R_PPC64_REL14 against `.text'+1ca4 So instead of doing a conditional branch outside of the feature section, let's just jump at the end of the same, making the branch very short. Signed-off-by: Alexander Graf Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index a7a20b85d8eb..7d1459e77de2 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -413,10 +413,11 @@ FTR_SECTION_ELSE /* On P9 we use the split_info for coordinating LPCR changes */ lwz r4, KVM_SPLIT_DO_SET(r6) cmpwi r4, 0 - beq 63f + beq 1f mr r3, r6 bl kvmhv_p9_set_lpcr nop +1: ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 63: /* Order load of vcpu after load of vcore */ -- cgit From 09f984961c137c4b252c368adab7e1c9f035fa59 Mon Sep 17 00:00:00 2001 From: Jose Ricardo Ziviani Date: Sat, 3 Feb 2018 18:24:26 -0200 Subject: KVM: PPC: Book3S: Add MMIO emulation for VMX instructions This patch provides the MMIO load/store vector indexed X-Form emulation. Instructions implemented: lvx: the quadword in storage addressed by the result of EA & 0xffff_ffff_ffff_fff0 is loaded into VRT. stvx: the contents of VRS are stored into the quadword in storage addressed by the result of EA & 0xffff_ffff_ffff_fff0. Reported-by: Gopesh Kumar Chaudhary Reported-by: Balamuruhan S Signed-off-by: Jose Ricardo Ziviani Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_host.h | 2 + arch/powerpc/include/asm/kvm_ppc.h | 4 + arch/powerpc/include/asm/ppc-opcode.h | 6 ++ arch/powerpc/kvm/emulate_loadstore.c | 36 ++++++++ arch/powerpc/kvm/powerpc.c | 150 ++++++++++++++++++++++++++++++++++ 5 files changed, 198 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index fef8133becc8..1f53b562726f 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -690,6 +690,7 @@ struct kvm_vcpu_arch { u8 mmio_vsx_offset; u8 mmio_vsx_copy_type; u8 mmio_vsx_tx_sx_enabled; + u8 mmio_vmx_copy_nums; u8 osi_needed; u8 osi_enabled; u8 papr_enabled; @@ -804,6 +805,7 @@ struct kvm_vcpu_arch { #define KVM_MMIO_REG_QPR 0x0040 #define KVM_MMIO_REG_FQPR 0x0060 #define KVM_MMIO_REG_VSX 0x0080 +#define KVM_MMIO_REG_VMX 0x00c0 #define __KVM_HAVE_ARCH_WQP #define __KVM_HAVE_CREATE_DEVICE diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 941c2a3f231b..28c203003519 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -81,6 +81,10 @@ extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian, int mmio_sign_extend); +extern int kvmppc_handle_load128_by2x64(struct kvm_run *run, + struct kvm_vcpu *vcpu, unsigned int rt, int is_default_endian); +extern int kvmppc_handle_store128_by2x64(struct kvm_run *run, + struct kvm_vcpu *vcpu, unsigned int rs, int is_default_endian); extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, u64 val, unsigned int bytes, int is_default_endian); diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index ce0930d68857..a51febca08c5 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -156,6 +156,12 @@ #define OP_31_XOP_LFDX 599 #define OP_31_XOP_LFDUX 631 +/* VMX Vector Load Instructions */ +#define OP_31_XOP_LVX 103 + +/* VMX Vector Store Instructions */ +#define OP_31_XOP_STVX 231 + #define OP_LWZ 32 #define OP_STFS 52 #define OP_STFSU 53 diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index af833531af31..a382e15135e6 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -58,6 +58,18 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) } #endif /* CONFIG_VSX */ +#ifdef CONFIG_ALTIVEC +static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) +{ + if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { + kvmppc_core_queue_vec_unavail(vcpu); + return true; + } + + return false; +} +#endif /* CONFIG_ALTIVEC */ + /* * XXX to do: * lfiwax, lfiwzx @@ -98,6 +110,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE; vcpu->arch.mmio_sp64_extend = 0; vcpu->arch.mmio_sign_extend = 0; + vcpu->arch.mmio_vmx_copy_nums = 0; switch (get_op(inst)) { case 31: @@ -459,6 +472,29 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) rs, 4, 1); break; #endif /* CONFIG_VSX */ + +#ifdef CONFIG_ALTIVEC + case OP_31_XOP_LVX: + if (kvmppc_check_altivec_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.vaddr_accessed &= ~0xFULL; + vcpu->arch.paddr_accessed &= ~0xFULL; + vcpu->arch.mmio_vmx_copy_nums = 2; + emulated = kvmppc_handle_load128_by2x64(run, vcpu, + KVM_MMIO_REG_VMX|rt, 1); + break; + + case OP_31_XOP_STVX: + if (kvmppc_check_altivec_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.vaddr_accessed &= ~0xFULL; + vcpu->arch.paddr_accessed &= ~0xFULL; + vcpu->arch.mmio_vmx_copy_nums = 2; + emulated = kvmppc_handle_store128_by2x64(run, vcpu, + rs, 1); + break; +#endif /* CONFIG_ALTIVEC */ + default: emulated = EMULATE_FAIL; break; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index cf86aeb43fcf..47c7a302fd03 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -924,6 +924,34 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, } #endif /* CONFIG_VSX */ +#ifdef CONFIG_ALTIVEC +static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, + u64 gpr) +{ + int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; + u32 hi, lo; + u32 di; + +#ifdef __BIG_ENDIAN + hi = gpr >> 32; + lo = gpr & 0xffffffff; +#else + lo = gpr >> 32; + hi = gpr & 0xffffffff; +#endif + + di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ + if (di > 1) + return; + + if (vcpu->arch.mmio_host_swabbed) + di = 1 - di; + + VCPU_VSX_VR(vcpu, index).u[di * 2] = hi; + VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo; +} +#endif /* CONFIG_ALTIVEC */ + #ifdef CONFIG_PPC_FPU static inline u64 sp_to_dp(u32 fprs) { @@ -1026,6 +1054,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) kvmppc_set_vsr_dword_dump(vcpu, gpr); break; +#endif +#ifdef CONFIG_ALTIVEC + case KVM_MMIO_REG_VMX: + kvmppc_set_vmx_dword(vcpu, gpr); + break; #endif default: BUG(); @@ -1302,6 +1335,111 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, } #endif /* CONFIG_VSX */ +#ifdef CONFIG_ALTIVEC +/* handle quadword load access in two halves */ +int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rt, int is_default_endian) +{ + enum emulation_result emulated; + + while (vcpu->arch.mmio_vmx_copy_nums) { + emulated = __kvmppc_handle_load(run, vcpu, rt, 8, + is_default_endian, 0); + + if (emulated != EMULATE_DONE) + break; + + vcpu->arch.paddr_accessed += run->mmio.len; + vcpu->arch.mmio_vmx_copy_nums--; + } + + return emulated; +} + +static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val) +{ + vector128 vrs = VCPU_VSX_VR(vcpu, rs); + u32 di; + u64 w0, w1; + + di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ + if (di > 1) + return -1; + + if (vcpu->arch.mmio_host_swabbed) + di = 1 - di; + + w0 = vrs.u[di * 2]; + w1 = vrs.u[di * 2 + 1]; + +#ifdef __BIG_ENDIAN + *val = (w0 << 32) | w1; +#else + *val = (w1 << 32) | w0; +#endif + return 0; +} + +/* handle quadword store in two halves */ +int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rs, int is_default_endian) +{ + u64 val = 0; + enum emulation_result emulated = EMULATE_DONE; + + vcpu->arch.io_gpr = rs; + + while (vcpu->arch.mmio_vmx_copy_nums) { + if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1) + return EMULATE_FAIL; + + emulated = kvmppc_handle_store(run, vcpu, val, 8, + is_default_endian); + if (emulated != EMULATE_DONE) + break; + + vcpu->arch.paddr_accessed += run->mmio.len; + vcpu->arch.mmio_vmx_copy_nums--; + } + + return emulated; +} + +static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, + struct kvm_run *run) +{ + enum emulation_result emulated = EMULATE_FAIL; + int r; + + vcpu->arch.paddr_accessed += run->mmio.len; + + if (!vcpu->mmio_is_write) { + emulated = kvmppc_handle_load128_by2x64(run, vcpu, + vcpu->arch.io_gpr, 1); + } else { + emulated = kvmppc_handle_store128_by2x64(run, vcpu, + vcpu->arch.io_gpr, 1); + } + + switch (emulated) { + case EMULATE_DO_MMIO: + run->exit_reason = KVM_EXIT_MMIO; + r = RESUME_HOST; + break; + case EMULATE_FAIL: + pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + r = RESUME_HOST; + break; + default: + r = RESUME_GUEST; + break; + } + return r; +} +#endif /* CONFIG_ALTIVEC */ + int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) { int r = 0; @@ -1420,6 +1558,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) return r; } } +#endif +#ifdef CONFIG_ALTIVEC + if (vcpu->arch.mmio_vmx_copy_nums > 0) + vcpu->arch.mmio_vmx_copy_nums--; + + if (vcpu->arch.mmio_vmx_copy_nums > 0) { + r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); + if (r == RESUME_HOST) { + vcpu->mmio_needed = 1; + return r; + } + } #endif } else if (vcpu->arch.osi_needed) { u64 *gprs = run->osi.gprs; -- cgit