diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_builtin.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_builtin.c | 377 |
1 files changed, 44 insertions, 333 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 7cd3cf3d366b..fa0e3a22cac0 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -15,11 +15,11 @@ #include <linux/cma.h> #include <linux/bitops.h> -#include <asm/asm-prototypes.h> #include <asm/cputable.h> +#include <asm/interrupt.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> -#include <asm/archrandom.h> +#include <asm/machdep.h> #include <asm/xics.h> #include <asm/xive.h> #include <asm/dbell.h> @@ -32,21 +32,7 @@ #include "book3s_xics.h" #include "book3s_xive.h" - -/* - * The XIVE module will populate these when it loads - */ -unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu); -unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server); -int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, - unsigned long mfrr); -int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr); -int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); -EXPORT_SYMBOL_GPL(__xive_vm_h_xirr); -EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll); -EXPORT_SYMBOL_GPL(__xive_vm_h_ipi); -EXPORT_SYMBOL_GPL(__xive_vm_h_cppr); -EXPORT_SYMBOL_GPL(__xive_vm_h_eoi); +#include "book3s_hv.h" /* * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) @@ -95,25 +81,17 @@ EXPORT_SYMBOL_GPL(kvm_free_hpt_cma); void __init kvm_cma_reserve(void) { unsigned long align_size; - struct memblock_region *reg; - phys_addr_t selected_size = 0; + phys_addr_t selected_size; /* * We need CMA reservation only when we are in HV mode */ if (!cpu_has_feature(CPU_FTR_HVMODE)) return; - /* - * We cannot use memblock_phys_mem_size() here, because - * memblock_analyze() has not been called yet. - */ - for_each_memblock(memory, reg) - selected_size += memblock_region_memory_end_pfn(reg) - - memblock_region_memory_base_pfn(reg); - selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; + selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100); if (selected_size) { - pr_debug("%s: reserving %ld MiB for global area\n", __func__, + pr_info("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)selected_size / SZ_1M); align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; cma_declare_contiguous(0, selected_size, 0, align_size, @@ -159,23 +137,23 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, * exist in the system. We use a counter of VMs to track this. * * One of the operations we need to block is onlining of secondaries, so we - * protect hv_vm_count with get/put_online_cpus(). + * protect hv_vm_count with cpus_read_lock/unlock(). */ static atomic_t hv_vm_count; void kvm_hv_vm_activated(void) { - get_online_cpus(); + cpus_read_lock(); atomic_inc(&hv_vm_count); - put_online_cpus(); + cpus_read_unlock(); } EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); void kvm_hv_vm_deactivated(void) { - get_online_cpus(); + cpus_read_lock(); atomic_dec(&hv_vm_count); - put_online_cpus(); + cpus_read_unlock(); } EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); @@ -199,21 +177,19 @@ EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); int kvmppc_hwrng_present(void) { - return powernv_hwrng_present(); + return ppc_md.get_random_seed != NULL; } EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); -long kvmppc_h_random(struct kvm_vcpu *vcpu) +long kvmppc_rm_h_random(struct kvm_vcpu *vcpu) { - int r; + unsigned long rand; - /* Only need to do the expensive mfmsr() on radix */ - if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR)) - r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]); - else - r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]); - if (r) + if (ppc_md.get_random_seed && + ppc_md.get_random_seed(&rand)) { + kvmppc_set_gpr(vcpu, 4, rand); return H_SUCCESS; + } return H_HARDWARE; } @@ -228,15 +204,6 @@ void kvmhv_rm_send_ipi(int cpu) void __iomem *xics_phys; unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); - /* For a nested hypervisor, use the XICS via hcall */ - if (kvmhv_on_pseries()) { - unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; - - plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu), - IPI_PRIORITY); - return; - } - /* On POWER9 we can use msgsnd for any destination cpu. */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { msg |= get_hard_smp_processor_id(cpu); @@ -285,8 +252,7 @@ void kvmhv_commence_exit(int trap) struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; int ptid = local_paca->kvm_hstate.ptid; struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; - int me, ee, i, t; - int cpu0; + int me, ee, i; /* Set our bit in the threads-exiting-guest map in the 0xff00 bits of vcore->entry_exit_map */ @@ -328,22 +294,6 @@ void kvmhv_commence_exit(int trap) if ((ee >> 8) == 0) kvmhv_interrupt_vcore(vc, ee); } - - /* - * On POWER9 when running a HPT guest on a radix host (sip != NULL), - * we have to interrupt inactive CPU threads to get them to - * restore the host LPCR value. - */ - if (sip->lpcr_req) { - if (cmpxchg(&sip->do_restore, 0, 1) == 0) { - vc = local_paca->kvm_hstate.kvm_vcore; - cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid; - for (t = 1; t < threads_per_core; ++t) { - if (sip->napped[t]) - kvmhv_rm_send_ipi(cpu0 + t); - } - } - } } struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; @@ -461,24 +411,17 @@ static long kvmppc_read_one_intr(bool *again) return 1; /* see if a host IPI is pending */ - host_ipi = local_paca->kvm_hstate.host_ipi; + host_ipi = READ_ONCE(local_paca->kvm_hstate.host_ipi); if (host_ipi) return 1; /* Now read the interrupt from the ICP */ - if (kvmhv_on_pseries()) { - unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; - - rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF); - xirr = cpu_to_be32(retbuf[0]); - } else { - xics_phys = local_paca->kvm_hstate.xics_phys; - rc = 0; - if (!xics_phys) - rc = opal_int_get_xirr(&xirr, false); - else - xirr = __raw_rm_readl(xics_phys + XICS_XIRR); - } + xics_phys = local_paca->kvm_hstate.xics_phys; + rc = 0; + if (!xics_phys) + rc = opal_int_get_xirr(&xirr, false); + else + xirr = __raw_rm_readl(xics_phys + XICS_XIRR); if (rc < 0) return 1; @@ -507,13 +450,7 @@ static long kvmppc_read_one_intr(bool *again) */ if (xisr == XICS_IPI) { rc = 0; - if (kvmhv_on_pseries()) { - unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; - - plpar_hcall_raw(H_IPI, retbuf, - hard_smp_processor_id(), 0xff); - plpar_hcall_raw(H_EOI, retbuf, h_xirr); - } else if (xics_phys) { + if (xics_phys) { __raw_rm_writeb(0xff, xics_phys + XICS_MFRR); __raw_rm_writel(xirr, xics_phys + XICS_XIRR); } else { @@ -534,18 +471,12 @@ static long kvmppc_read_one_intr(bool *again) * meantime. If it's clear, we bounce the interrupt to the * guest */ - host_ipi = local_paca->kvm_hstate.host_ipi; + host_ipi = READ_ONCE(local_paca->kvm_hstate.host_ipi); if (unlikely(host_ipi != 0)) { /* We raced with the host, * we need to resend that IPI, bummer */ - if (kvmhv_on_pseries()) { - unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; - - plpar_hcall_raw(H_IPI, retbuf, - hard_smp_processor_id(), - IPI_PRIORITY); - } else if (xics_phys) + if (xics_phys) __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); else @@ -564,197 +495,6 @@ static long kvmppc_read_one_intr(bool *again) return kvmppc_check_passthru(xisr, xirr, again); } -#ifdef CONFIG_KVM_XICS -static inline bool is_rm(void) -{ - return !(mfmsr() & MSR_DR); -} - -unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) -{ - if (!kvmppc_xics_enabled(vcpu)) - return H_TOO_HARD; - if (xics_on_xive()) { - if (is_rm()) - return xive_rm_h_xirr(vcpu); - if (unlikely(!__xive_vm_h_xirr)) - return H_NOT_AVAILABLE; - return __xive_vm_h_xirr(vcpu); - } else - return xics_rm_h_xirr(vcpu); -} - -unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) -{ - if (!kvmppc_xics_enabled(vcpu)) - return H_TOO_HARD; - vcpu->arch.regs.gpr[5] = get_tb(); - if (xics_on_xive()) { - if (is_rm()) - return xive_rm_h_xirr(vcpu); - if (unlikely(!__xive_vm_h_xirr)) - return H_NOT_AVAILABLE; - return __xive_vm_h_xirr(vcpu); - } else - return xics_rm_h_xirr(vcpu); -} - -unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) -{ - if (!kvmppc_xics_enabled(vcpu)) - return H_TOO_HARD; - if (xics_on_xive()) { - if (is_rm()) - return xive_rm_h_ipoll(vcpu, server); - if (unlikely(!__xive_vm_h_ipoll)) - return H_NOT_AVAILABLE; - return __xive_vm_h_ipoll(vcpu, server); - } else - return H_TOO_HARD; -} - -int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, - unsigned long mfrr) -{ - if (!kvmppc_xics_enabled(vcpu)) - return H_TOO_HARD; - if (xics_on_xive()) { - if (is_rm()) - return xive_rm_h_ipi(vcpu, server, mfrr); - if (unlikely(!__xive_vm_h_ipi)) - return H_NOT_AVAILABLE; - return __xive_vm_h_ipi(vcpu, server, mfrr); - } else - return xics_rm_h_ipi(vcpu, server, mfrr); -} - -int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) -{ - if (!kvmppc_xics_enabled(vcpu)) - return H_TOO_HARD; - if (xics_on_xive()) { - if (is_rm()) - return xive_rm_h_cppr(vcpu, cppr); - if (unlikely(!__xive_vm_h_cppr)) - return H_NOT_AVAILABLE; - return __xive_vm_h_cppr(vcpu, cppr); - } else - return xics_rm_h_cppr(vcpu, cppr); -} - -int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) -{ - if (!kvmppc_xics_enabled(vcpu)) - return H_TOO_HARD; - if (xics_on_xive()) { - if (is_rm()) - return xive_rm_h_eoi(vcpu, xirr); - if (unlikely(!__xive_vm_h_eoi)) - return H_NOT_AVAILABLE; - return __xive_vm_h_eoi(vcpu, xirr); - } else - return xics_rm_h_eoi(vcpu, xirr); -} -#endif /* CONFIG_KVM_XICS */ - -void kvmppc_bad_interrupt(struct pt_regs *regs) -{ - /* - * 100 could happen at any time, 200 can happen due to invalid real - * address access for example (or any time due to a hardware problem). - */ - if (TRAP(regs) == 0x100) { - get_paca()->in_nmi++; - system_reset_exception(regs); - get_paca()->in_nmi--; - } else if (TRAP(regs) == 0x200) { - machine_check_exception(regs); - } else { - die("Bad interrupt in KVM entry/exit code", regs, SIGABRT); - } - panic("Bad KVM trap"); -} - -/* - * Functions used to switch LPCR HR and UPRT bits on all threads - * when entering and exiting HPT guests on a radix host. - */ - -#define PHASE_REALMODE 1 /* in real mode */ -#define PHASE_SET_LPCR 2 /* have set LPCR */ -#define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */ -#define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */ - -#define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p)) - -static void wait_for_sync(struct kvm_split_mode *sip, int phase) -{ - int thr = local_paca->kvm_hstate.tid; - - sip->lpcr_sync.phase[thr] |= phase; - phase = ALL(phase); - while ((sip->lpcr_sync.allphases & phase) != phase) { - HMT_low(); - barrier(); - } - HMT_medium(); -} - -void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip) -{ - unsigned long rb, set; - - /* wait for every other thread to get to real mode */ - wait_for_sync(sip, PHASE_REALMODE); - - /* Set LPCR and LPIDR */ - mtspr(SPRN_LPCR, sip->lpcr_req); - mtspr(SPRN_LPID, sip->lpidr_req); - isync(); - - /* Invalidate the TLB on thread 0 */ - if (local_paca->kvm_hstate.tid == 0) { - sip->do_set = 0; - asm volatile("ptesync" : : : "memory"); - for (set = 0; set < POWER9_TLB_SETS_RADIX; ++set) { - rb = TLBIEL_INVAL_SET_LPID + - (set << TLBIEL_INVAL_SET_SHIFT); - asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : : - "r" (rb), "r" (0)); - } - asm volatile("ptesync" : : : "memory"); - } - - /* indicate that we have done so and wait for others */ - wait_for_sync(sip, PHASE_SET_LPCR); - /* order read of sip->lpcr_sync.allphases vs. sip->do_set */ - smp_rmb(); -} - -/* - * Called when a thread that has been in the guest needs - * to reload the host LPCR value - but only on POWER9 when - * running a HPT guest on a radix host. - */ -void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) -{ - /* we're out of the guest... */ - wait_for_sync(sip, PHASE_OUT_OF_GUEST); - - mtspr(SPRN_LPID, 0); - mtspr(SPRN_LPCR, sip->host_lpcr); - isync(); - - if (local_paca->kvm_hstate.tid == 0) { - sip->do_restore = 0; - smp_wmb(); /* order store of do_restore vs. phase */ - } - - wait_for_sync(sip, PHASE_RESET_LPCR); - smp_mb(); - local_paca->kvm_hstate.kvm_split_mode = NULL; -} - static void kvmppc_end_cede(struct kvm_vcpu *vcpu) { vcpu->arch.ceded = 0; @@ -766,13 +506,16 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu) void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) { + /* Guest must always run with ME enabled, HV disabled. */ + msr = (msr | MSR_ME) & ~MSR_HV; + /* * Check for illegal transactional state bit combination * and if we find it, force the TS field to a safe state. */ if ((msr & MSR_TS_MASK) == MSR_TS_MASK) msr &= ~MSR_TS_MASK; - vcpu->arch.shregs.msr = msr; + __kvmppc_set_msr_hv(vcpu, msr); kvmppc_end_cede(vcpu); } EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv); @@ -810,7 +553,7 @@ static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) kvmppc_set_srr0(vcpu, pc); kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); kvmppc_set_pc(vcpu, new_pc); - vcpu->arch.shregs.msr = new_msr; + __kvmppc_set_msr_hv(vcpu, new_msr); } void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) @@ -829,6 +572,8 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) int ext; unsigned long lpcr; + WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); + /* Insert EXTERNAL bit into LPCR at the MER bit position */ ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1; lpcr = mfspr(SPRN_LPCR); @@ -862,57 +607,23 @@ static void flush_guest_tlb(struct kvm *kvm) unsigned long rb, set; rb = PPC_BIT(52); /* IS = 2 */ - if (kvm_is_radix(kvm)) { - /* R=1 PRS=1 RIC=2 */ + for (set = 0; set < kvm->arch.tlb_sets; ++set) { + /* R=0 PRS=0 RIC=0 */ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) - : : "r" (rb), "i" (1), "i" (1), "i" (2), + : : "r" (rb), "i" (0), "i" (0), "i" (0), "r" (0) : "memory"); - for (set = 1; set < kvm->arch.tlb_sets; ++set) { - rb += PPC_BIT(51); /* increment set number */ - /* R=1 PRS=1 RIC=0 */ - asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) - : : "r" (rb), "i" (1), "i" (1), "i" (0), - "r" (0) : "memory"); - } - asm volatile("ptesync": : :"memory"); - asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory"); - } else { - for (set = 0; set < kvm->arch.tlb_sets; ++set) { - /* R=0 PRS=0 RIC=0 */ - asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) - : : "r" (rb), "i" (0), "i" (0), "i" (0), - "r" (0) : "memory"); - rb += PPC_BIT(51); /* increment set number */ - } - asm volatile("ptesync": : :"memory"); - asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory"); + rb += PPC_BIT(51); /* increment set number */ } + asm volatile("ptesync": : :"memory"); } -void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu, - struct kvm_nested_guest *nested) +void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu) { - cpumask_t *need_tlb_flush; - - /* - * On POWER9, individual threads can come in here, but the - * TLB is shared between the 4 threads in a core, hence - * invalidating on one thread invalidates for all. - * Thus we make all 4 threads use the same bit. - */ - if (cpu_has_feature(CPU_FTR_ARCH_300)) - pcpu = cpu_first_thread_sibling(pcpu); - - if (nested) - need_tlb_flush = &nested->need_tlb_flush; - else - need_tlb_flush = &kvm->arch.need_tlb_flush; - - if (cpumask_test_cpu(pcpu, need_tlb_flush)) { + if (cpumask_test_cpu(pcpu, &kvm->arch.need_tlb_flush)) { flush_guest_tlb(kvm); /* Clear the bit after the TLB flush */ - cpumask_clear_cpu(pcpu, need_tlb_flush); + cpumask_clear_cpu(pcpu, &kvm->arch.need_tlb_flush); } } EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush); |