diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv.c')
| -rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 3686 |
1 files changed, 3026 insertions, 660 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 359c79cdf0cc..7667563fb9ff 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. @@ -12,13 +13,10 @@ * * This file is derived from arch/powerpc/kvm/book3s.c, * by Alexander Graf <agraf@suse.de>. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. */ #include <linux/kvm_host.h> +#include <linux/kernel.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/preempt.h> @@ -44,19 +42,26 @@ #include <linux/module.h> #include <linux/compiler.h> #include <linux/of.h> +#include <linux/irqdomain.h> +#include <linux/smp.h> +#include <asm/ftrace.h> #include <asm/reg.h> #include <asm/ppc-opcode.h> +#include <asm/asm-prototypes.h> +#include <asm/archrandom.h> +#include <asm/debug.h> #include <asm/disassemble.h> #include <asm/cputable.h> #include <asm/cacheflush.h> -#include <asm/tlbflush.h> #include <linux/uaccess.h> +#include <asm/interrupt.h> #include <asm/io.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/mmu_context.h> #include <asm/lppaca.h> +#include <asm/pmc.h> #include <asm/processor.h> #include <asm/cputhreads.h> #include <asm/page.h> @@ -70,8 +75,16 @@ #include <asm/opal.h> #include <asm/xics.h> #include <asm/xive.h> +#include <asm/hw_breakpoint.h> +#include <asm/kvm_book3s_uvmem.h> +#include <asm/ultravisor.h> +#include <asm/dtl.h> +#include <asm/plpar_wrappers.h> + +#include <trace/events/ipi.h> #include "book3s.h" +#include "book3s_hv.h" #define CREATE_TRACE_POINTS #include "trace_hv.h" @@ -91,30 +104,62 @@ static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); static int dynamic_mt_modes = 6; -module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR); +module_param(dynamic_mt_modes, int, 0644); MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)"); static int target_smt_mode; -module_param(target_smt_mode, int, S_IRUGO | S_IWUSR); +module_param(target_smt_mode, int, 0644); MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)"); +static bool one_vm_per_core; +module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires POWER8 or older)"); + #ifdef CONFIG_KVM_XICS -static struct kernel_param_ops module_param_ops = { +static const struct kernel_param_ops module_param_ops = { .set = param_set_int, .get = param_get_int, }; -module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass, - S_IRUGO | S_IWUSR); +module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass, 0644); MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization"); -module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, - S_IRUGO | S_IWUSR); +module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644); MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core"); #endif -static void kvmppc_end_cede(struct kvm_vcpu *vcpu); +/* If set, guests are allowed to create and control nested guests */ +static bool nested = true; +module_param(nested, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)"); + static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); +/* + * RWMR values for POWER8. These control the rate at which PURR + * and SPURR count and should be set according to the number of + * online threads in the vcore being run. + */ +#define RWMR_RPA_P8_1THREAD 0x164520C62609AECAUL +#define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9UL +#define RWMR_RPA_P8_3THREAD 0x164520C62609AECAUL +#define RWMR_RPA_P8_4THREAD 0x199A421245058DA9UL +#define RWMR_RPA_P8_5THREAD 0x164520C62609AECAUL +#define RWMR_RPA_P8_6THREAD 0x164520C62609AECAUL +#define RWMR_RPA_P8_7THREAD 0x164520C62609AECAUL +#define RWMR_RPA_P8_8THREAD 0x164520C62609AECAUL + +static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = { + RWMR_RPA_P8_1THREAD, + RWMR_RPA_P8_1THREAD, + RWMR_RPA_P8_2THREAD, + RWMR_RPA_P8_3THREAD, + RWMR_RPA_P8_4THREAD, + RWMR_RPA_P8_5THREAD, + RWMR_RPA_P8_6THREAD, + RWMR_RPA_P8_7THREAD, + RWMR_RPA_P8_8THREAD, +}; + static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc, int *ip) { @@ -139,6 +184,10 @@ static bool kvmppc_ipi_thread(int cpu) { unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); + /* If we're a nested hypervisor, fall back to ordinary IPIs for now */ + if (kvmhv_on_pseries()) + return false; + /* On POWER9 we can use msgsnd to IPI any cpu */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { msg |= get_hard_smp_processor_id(cpu); @@ -163,7 +212,7 @@ static bool kvmppc_ipi_thread(int cpu) #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) if (cpu >= 0 && cpu < nr_cpu_ids) { - if (paca[cpu].kvm_hstate.xics_phys) { + if (paca_ptrs[cpu]->kvm_hstate.xics_phys) { xics_wake_cpu(cpu); return true; } @@ -178,13 +227,18 @@ static bool kvmppc_ipi_thread(int cpu) static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) { int cpu; - struct swait_queue_head *wqp; + struct rcuwait *waitp; - wqp = kvm_arch_vcpu_wq(vcpu); - if (swait_active(wqp)) { - swake_up(wqp); - ++vcpu->stat.halt_wakeup; - } + /* + * rcuwait_wake_up contains smp_mb() which orders prior stores that + * create pending work vs below loads of cpu fields. The other side + * is the barrier in vcpu run that orders setting the cpu fields vs + * testing for pending work. + */ + + waitp = kvm_arch_vcpu_get_wait(vcpu); + if (rcuwait_wake_up(waitp)) + ++vcpu->stat.generic.halt_wakeup; cpu = READ_ONCE(vcpu->arch.thread_cpu); if (cpu >= 0 && kvmppc_ipi_thread(cpu)) @@ -198,6 +252,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) /* * We use the vcpu_load/put functions to measure stolen time. + * * Stolen time is counted as time when either the vcpu is able to * run as part of a virtual core, but the task running the vcore * is preempted or sleeping, or when the vcpu needs something done @@ -227,24 +282,34 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) * lock. The stolen times are measured in units of timebase ticks. * (Note that the != TB_NIL checks below are purely defensive; * they should never fail.) + * + * The POWER9 path is simpler, one vcpu per virtual core so the + * former case does not exist. If a vcpu is preempted when it is + * BUSY_IN_HOST and not ceded or otherwise blocked, then accumulate + * the stolen cycles in busy_stolen. RUNNING is not a preemptible + * state in the P9 path. */ -static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc) +static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc, u64 tb) { unsigned long flags; + WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); + spin_lock_irqsave(&vc->stoltb_lock, flags); - vc->preempt_tb = mftb(); + vc->preempt_tb = tb; spin_unlock_irqrestore(&vc->stoltb_lock, flags); } -static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc) +static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc, u64 tb) { unsigned long flags; + WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); + spin_lock_irqsave(&vc->stoltb_lock, flags); if (vc->preempt_tb != TB_NIL) { - vc->stolen_tb += mftb() - vc->preempt_tb; + vc->stolen_tb += tb - vc->preempt_tb; vc->preempt_tb = TB_NIL; } spin_unlock_irqrestore(&vc->stoltb_lock, flags); @@ -254,6 +319,18 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; unsigned long flags; + u64 now; + + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + if (vcpu->arch.busy_preempt != TB_NIL) { + WARN_ON_ONCE(vcpu->arch.state != KVMPPC_VCPU_BUSY_IN_HOST); + vc->stolen_tb += mftb() - vcpu->arch.busy_preempt; + vcpu->arch.busy_preempt = TB_NIL; + } + return; + } + + now = mftb(); /* * We can test vc->runner without taking the vcore lock, @@ -262,12 +339,12 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) * ever sets it to NULL. */ if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) - kvmppc_core_end_stolen(vc); + kvmppc_core_end_stolen(vc, now); spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && vcpu->arch.busy_preempt != TB_NIL) { - vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; + vcpu->arch.busy_stolen += now - vcpu->arch.busy_preempt; vcpu->arch.busy_preempt = TB_NIL; } spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); @@ -277,43 +354,73 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; unsigned long flags; + u64 now; + + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + /* + * In the P9 path, RUNNABLE is not preemptible + * (nor takes host interrupts) + */ + WARN_ON_ONCE(vcpu->arch.state == KVMPPC_VCPU_RUNNABLE); + /* + * Account stolen time when preempted while the vcpu task is + * running in the kernel (but not in qemu, which is INACTIVE). + */ + if (task_is_running(current) && + vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) + vcpu->arch.busy_preempt = mftb(); + return; + } + + now = mftb(); if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) - kvmppc_core_start_stolen(vc); + kvmppc_core_start_stolen(vc, now); spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) - vcpu->arch.busy_preempt = mftb(); + vcpu->arch.busy_preempt = now; spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); } -static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) -{ - /* - * Check for illegal transactional state bit combination - * and if we find it, force the TS field to a safe state. - */ - if ((msr & MSR_TS_MASK) == MSR_TS_MASK) - msr &= ~MSR_TS_MASK; - vcpu->arch.shregs.msr = msr; - kvmppc_end_cede(vcpu); -} - static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) { vcpu->arch.pvr = pvr; } /* Dummy value used in computing PCR value below */ -#define PCR_ARCH_300 (PCR_ARCH_207 << 1) +#define PCR_ARCH_31 (PCR_ARCH_300 << 1) + +static inline unsigned long map_pcr_to_cap(unsigned long pcr) +{ + unsigned long cap = 0; + + switch (pcr) { + case PCR_ARCH_300: + cap = H_GUEST_CAP_POWER9; + break; + case PCR_ARCH_31: + if (cpu_has_feature(CPU_FTR_P11_PVR)) + cap = H_GUEST_CAP_POWER11; + else + cap = H_GUEST_CAP_POWER10; + break; + default: + break; + } + + return cap; +} static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) { - unsigned long host_pcr_bit = 0, guest_pcr_bit = 0; + unsigned long host_pcr_bit = 0, guest_pcr_bit = 0, cap = 0; struct kvmppc_vcore *vc = vcpu->arch.vcore; /* We can (emulate) our own architecture version and anything older */ - if (cpu_has_feature(CPU_FTR_ARCH_300)) + if (cpu_has_feature(CPU_FTR_P11_PVR) || cpu_has_feature(CPU_FTR_ARCH_31)) + host_pcr_bit = PCR_ARCH_31; + else if (cpu_has_feature(CPU_FTR_ARCH_300)) host_pcr_bit = PCR_ARCH_300; else if (cpu_has_feature(CPU_FTR_ARCH_207S)) host_pcr_bit = PCR_ARCH_207; @@ -339,6 +446,10 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) case PVR_ARCH_300: guest_pcr_bit = PCR_ARCH_300; break; + case PVR_ARCH_31: + case PVR_ARCH_31_P11: + guest_pcr_bit = PCR_ARCH_31; + break; default: return -EINVAL; } @@ -348,10 +459,25 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) if (guest_pcr_bit > host_pcr_bit) return -EINVAL; + if (kvmhv_on_pseries() && kvmhv_is_nestedv2()) { + /* + * 'arch_compat == 0' would mean the guest should default to + * L1's compatibility. In this case, the guest would pick + * host's PCR and evaluate the corresponding capabilities. + */ + cap = map_pcr_to_cap(guest_pcr_bit); + if (!(cap & nested_capabilities)) + return -EINVAL; + } + spin_lock(&vc->lock); vc->arch_compat = arch_compat; - /* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit */ - vc->pcr = host_pcr_bit - guest_pcr_bit; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LOGICAL_PVR); + /* + * Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit + * Also set all reserved PCR bits + */ + vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK; spin_unlock(&vc->lock); return 0; @@ -363,21 +489,21 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); pr_err("pc = %.16lx msr = %.16llx trap = %x\n", - vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); + vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); for (r = 0; r < 16; ++r) pr_err("r%2d = %.16lx r%d = %.16lx\n", r, kvmppc_get_gpr(vcpu, r), r+16, kvmppc_get_gpr(vcpu, r+16)); pr_err("ctr = %.16lx lr = %.16lx\n", - vcpu->arch.ctr, vcpu->arch.lr); + vcpu->arch.regs.ctr, vcpu->arch.regs.link); pr_err("srr0 = %.16llx srr1 = %.16llx\n", vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); - pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", - vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); + pr_err("cr = %.8lx xer = %.16lx dsisr = %.8x\n", + vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); pr_err("fault dar = %.16lx dsisr = %.8x\n", vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); @@ -385,19 +511,14 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) for (r = 0; r < vcpu->arch.slb_max; ++r) pr_err(" ESID = %.16llx VSID = %.16llx\n", vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); - pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", + pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.16lx\n", vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, vcpu->arch.last_inst); } static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) { - struct kvm_vcpu *ret; - - mutex_lock(&kvm->lock); - ret = kvm_get_vcpu_by_id(kvm, id); - mutex_unlock(&kvm->lock); - return ret; + return kvm_get_vcpu_by_id(kvm, id); } static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) @@ -485,6 +606,13 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, switch (subfunc) { case H_VPA_REG_VPA: /* register VPA */ + /* + * The size of our lppaca is 1kB because of the way we align + * it for the guest to avoid crossing a 4kB boundary. We only + * use 640 bytes of the structure though, so we should accept + * clients that set a size of 640. + */ + BUILD_BUG_ON(sizeof(struct lppaca) != 640); if (len < sizeof(struct lppaca)) break; vpap = &tvcpu->arch.vpa; @@ -548,7 +676,8 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, return err; } -static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) +static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap, + struct kvmppc_vpa *old_vpap) { struct kvm *kvm = vcpu->kvm; void *va; @@ -588,9 +717,8 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) kvmppc_unpin_guest_page(kvm, va, gpa, false); va = NULL; } - if (vpap->pinned_addr) - kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa, - vpap->dirty); + *old_vpap = *vpap; + vpap->gpa = gpa; vpap->pinned_addr = va; vpap->dirty = false; @@ -600,6 +728,9 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) { + struct kvm *kvm = vcpu->kvm; + struct kvmppc_vpa old_vpa = { 0 }; + if (!(vcpu->arch.vpa.update_pending || vcpu->arch.slb_shadow.update_pending || vcpu->arch.dtl.update_pending)) @@ -607,17 +738,34 @@ static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) spin_lock(&vcpu->arch.vpa_update_lock); if (vcpu->arch.vpa.update_pending) { - kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); - if (vcpu->arch.vpa.pinned_addr) + kvmppc_update_vpa(vcpu, &vcpu->arch.vpa, &old_vpa); + if (old_vpa.pinned_addr) { + if (kvmhv_is_nestedv2()) + kvmhv_nestedv2_set_vpa(vcpu, ~0ull); + kvmppc_unpin_guest_page(kvm, old_vpa.pinned_addr, old_vpa.gpa, + old_vpa.dirty); + } + if (vcpu->arch.vpa.pinned_addr) { init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); + if (kvmhv_is_nestedv2()) + kvmhv_nestedv2_set_vpa(vcpu, __pa(vcpu->arch.vpa.pinned_addr)); + } } if (vcpu->arch.dtl.update_pending) { - kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); + kvmppc_update_vpa(vcpu, &vcpu->arch.dtl, &old_vpa); + if (old_vpa.pinned_addr) + kvmppc_unpin_guest_page(kvm, old_vpa.pinned_addr, old_vpa.gpa, + old_vpa.dirty); vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; vcpu->arch.dtl_index = 0; } - if (vcpu->arch.slb_shadow.update_pending) - kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); + if (vcpu->arch.slb_shadow.update_pending) { + kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow, &old_vpa); + if (old_vpa.pinned_addr) + kvmppc_unpin_guest_page(kvm, old_vpa.pinned_addr, old_vpa.gpa, + old_vpa.dirty); + } + spin_unlock(&vcpu->arch.vpa_update_lock); } @@ -630,6 +778,8 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) u64 p; unsigned long flags; + WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); + spin_lock_irqsave(&vc->stoltb_lock, flags); p = vc->stolen_tb; if (vc->vcore_state != VCORE_INACTIVE && @@ -639,19 +789,55 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) return p; } -static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, - struct kvmppc_vcore *vc) +static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, + struct lppaca *vpa, + unsigned int pcpu, u64 now, + unsigned long stolen) { struct dtl_entry *dt; + + dt = vcpu->arch.dtl_ptr; + + if (!dt) + return; + + dt->dispatch_reason = 7; + dt->preempt_reason = 0; + dt->processor_id = cpu_to_be16(pcpu + vcpu->arch.ptid); + dt->enqueue_to_dispatch_time = cpu_to_be32(stolen); + dt->ready_to_enqueue_time = 0; + dt->waiting_to_ready_time = 0; + dt->timebase = cpu_to_be64(now); + dt->fault_addr = 0; + dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu)); + dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); + + ++dt; + if (dt == vcpu->arch.dtl.pinned_end) + dt = vcpu->arch.dtl.pinned_addr; + vcpu->arch.dtl_ptr = dt; + /* order writing *dt vs. writing vpa->dtl_idx */ + smp_wmb(); + vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); + + /* vcpu->arch.dtl.dirty is set by the caller */ +} + +static void kvmppc_update_vpa_dispatch(struct kvm_vcpu *vcpu, + struct kvmppc_vcore *vc) +{ struct lppaca *vpa; unsigned long stolen; unsigned long core_stolen; u64 now; unsigned long flags; - dt = vcpu->arch.dtl_ptr; vpa = vcpu->arch.vpa.pinned_addr; + if (!vpa) + return; + now = mftb(); + core_stolen = vcore_stolen_time(vc, now); stolen = core_stolen - vcpu->arch.stolen_logged; vcpu->arch.stolen_logged = core_stolen; @@ -659,23 +845,35 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, stolen += vcpu->arch.busy_stolen; vcpu->arch.busy_stolen = 0; spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); - if (!dt || !vpa) + + vpa->enqueue_dispatch_tb = cpu_to_be64(be64_to_cpu(vpa->enqueue_dispatch_tb) + stolen); + + __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + kvmppc_get_tb_offset(vcpu), stolen); + + vcpu->arch.vpa.dirty = true; +} + +static void kvmppc_update_vpa_dispatch_p9(struct kvm_vcpu *vcpu, + struct kvmppc_vcore *vc, + u64 now) +{ + struct lppaca *vpa; + unsigned long stolen; + unsigned long stolen_delta; + + vpa = vcpu->arch.vpa.pinned_addr; + if (!vpa) return; - memset(dt, 0, sizeof(struct dtl_entry)); - dt->dispatch_reason = 7; - dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); - dt->timebase = cpu_to_be64(now + vc->tb_offset); - dt->enqueue_to_dispatch_time = cpu_to_be32(stolen); - dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu)); - dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); - ++dt; - if (dt == vcpu->arch.dtl.pinned_end) - dt = vcpu->arch.dtl.pinned_addr; - vcpu->arch.dtl_ptr = dt; - /* order writing *dt vs. writing vpa->dtl_idx */ - smp_wmb(); - vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); - vcpu->arch.dtl.dirty = true; + + stolen = vc->stolen_tb; + stolen_delta = stolen - vcpu->arch.stolen_logged; + vcpu->arch.stolen_logged = stolen; + + vpa->enqueue_dispatch_tb = cpu_to_be64(stolen); + + __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now, stolen_delta); + + vcpu->arch.vpa.dirty = true; } /* See if there is a doorbell interrupt pending for a vcpu */ @@ -686,11 +884,12 @@ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu) if (vcpu->arch.doorbell_request) return true; + if (cpu_has_feature(CPU_FTR_ARCH_300)) + return false; /* * Ensure that the read of vcore->dpdes comes after the read * of vcpu->doorbell_request. This barrier matches the - * lwsync in book3s_hv_rmhandlers.S just before the - * fast_guest_return label. + * smp_wmb() in kvmppc_guest_entry_inject(). */ smp_rmb(); vc = vcpu->arch.vcore; @@ -700,9 +899,9 @@ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu) static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu) { - if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) + if (kvmppc_get_arch_compat(vcpu) >= PVR_ARCH_207) return true; - if ((!vcpu->arch.vcore->arch_compat) && + if ((!kvmppc_get_arch_compat(vcpu)) && cpu_has_feature(CPU_FTR_ARCH_207S)) return true; return false; @@ -723,23 +922,124 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, /* Guests can't breakpoint the hypervisor */ if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER) return H_P3; - vcpu->arch.ciabr = value1; + kvmppc_set_ciabr_hv(vcpu, value1); return H_SUCCESS; - case H_SET_MODE_RESOURCE_SET_DAWR: + case H_SET_MODE_RESOURCE_SET_DAWR0: if (!kvmppc_power8_compatible(vcpu)) return H_P2; + if (!ppc_breakpoint_available()) + return H_P2; + if (mflags) + return H_UNSUPPORTED_FLAG_START; + if (value2 & DABRX_HYP) + return H_P4; + kvmppc_set_dawr0_hv(vcpu, value1); + kvmppc_set_dawrx0_hv(vcpu, value2); + return H_SUCCESS; + case H_SET_MODE_RESOURCE_SET_DAWR1: + if (!kvmppc_power8_compatible(vcpu)) + return H_P2; + if (!ppc_breakpoint_available()) + return H_P2; + if (!cpu_has_feature(CPU_FTR_DAWR1)) + return H_P2; + if (!vcpu->kvm->arch.dawr1_enabled) + return H_FUNCTION; if (mflags) return H_UNSUPPORTED_FLAG_START; if (value2 & DABRX_HYP) return H_P4; - vcpu->arch.dawr = value1; - vcpu->arch.dawrx = value2; + kvmppc_set_dawr1_hv(vcpu, value1); + kvmppc_set_dawrx1_hv(vcpu, value2); return H_SUCCESS; + case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: + /* + * KVM does not support mflags=2 (AIL=2) and AIL=1 is reserved. + * Keep this in synch with kvmppc_filter_guest_lpcr_hv. + */ + if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) && + kvmhv_vcpu_is_radix(vcpu) && mflags == 3) + return H_UNSUPPORTED_FLAG_START; + return H_TOO_HARD; default: return H_TOO_HARD; } } +/* Copy guest memory in place - must reside within a single memslot */ +static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from, + unsigned long len) +{ + struct kvm_memory_slot *to_memslot = NULL; + struct kvm_memory_slot *from_memslot = NULL; + unsigned long to_addr, from_addr; + int r; + + /* Get HPA for from address */ + from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT); + if (!from_memslot) + return -EFAULT; + if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages) + << PAGE_SHIFT)) + return -EINVAL; + from_addr = gfn_to_hva_memslot(from_memslot, from >> PAGE_SHIFT); + if (kvm_is_error_hva(from_addr)) + return -EFAULT; + from_addr |= (from & (PAGE_SIZE - 1)); + + /* Get HPA for to address */ + to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT); + if (!to_memslot) + return -EFAULT; + if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages) + << PAGE_SHIFT)) + return -EINVAL; + to_addr = gfn_to_hva_memslot(to_memslot, to >> PAGE_SHIFT); + if (kvm_is_error_hva(to_addr)) + return -EFAULT; + to_addr |= (to & (PAGE_SIZE - 1)); + + /* Perform copy */ + r = raw_copy_in_user((void __user *)to_addr, (void __user *)from_addr, + len); + if (r) + return -EFAULT; + mark_page_dirty(kvm, to >> PAGE_SHIFT); + return 0; +} + +static long kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, + unsigned long dest, unsigned long src) +{ + u64 pg_sz = SZ_4K; /* 4K page size */ + u64 pg_mask = SZ_4K - 1; + int ret; + + /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */ + if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE | + H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED)) + return H_PARAMETER; + + /* dest (and src if copy_page flag set) must be page aligned */ + if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask))) + return H_PARAMETER; + + /* zero and/or copy the page as determined by the flags */ + if (flags & H_COPY_PAGE) { + ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz); + if (ret < 0) + return H_PARAMETER; + } else if (flags & H_ZERO_PAGE) { + ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz); + if (ret < 0) + return H_PARAMETER; + } + + /* We can ignore the remaining flags */ + + return H_SUCCESS; +} + static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target) { struct kvmppc_vcore *vcore = target->arch.vcore; @@ -750,14 +1050,19 @@ static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target) * H_SUCCESS if the source vcore wasn't idle (e.g. if it may * have useful work to do and should not confer) so we don't * recheck that here. + * + * In the case of the P9 single vcpu per vcore case, the real + * mode handler is not called but no other threads are in the + * source vcore. */ - - spin_lock(&vcore->lock); - if (target->arch.state == KVMPPC_VCPU_RUNNABLE && - vcore->vcore_state != VCORE_INACTIVE && - vcore->runner) - target = vcore->runner; - spin_unlock(&vcore->lock); + if (!cpu_has_feature(CPU_FTR_ARCH_300)) { + spin_lock(&vcore->lock); + if (target->arch.state == KVMPPC_VCPU_RUNNABLE && + vcore->vcore_state != VCORE_INACTIVE && + vcore->runner) + target = vcore->runner; + spin_unlock(&vcore->lock); + } return kvm_vcpu_yield_to(target); } @@ -775,8 +1080,71 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) return yield_count; } +/* + * H_RPT_INVALIDATE hcall handler for nested guests. + * + * Handles only nested process-scoped invalidation requests in L0. + */ +static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu) +{ + unsigned long type = kvmppc_get_gpr(vcpu, 6); + unsigned long pid, pg_sizes, start, end; + + /* + * The partition-scoped invalidations aren't handled here in L0. + */ + if (type & H_RPTI_TYPE_NESTED) + return RESUME_HOST; + + pid = kvmppc_get_gpr(vcpu, 4); + pg_sizes = kvmppc_get_gpr(vcpu, 7); + start = kvmppc_get_gpr(vcpu, 8); + end = kvmppc_get_gpr(vcpu, 9); + + do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid, + type, pg_sizes, start, end); + + kvmppc_set_gpr(vcpu, 3, H_SUCCESS); + return RESUME_GUEST; +} + +static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu, + unsigned long id, unsigned long target, + unsigned long type, unsigned long pg_sizes, + unsigned long start, unsigned long end) +{ + if (!kvm_is_radix(vcpu->kvm)) + return H_UNSUPPORTED; + + if (end < start) + return H_P5; + + /* + * Partition-scoped invalidation for nested guests. + */ + if (type & H_RPTI_TYPE_NESTED) { + if (!nesting_enabled(vcpu->kvm)) + return H_FUNCTION; + + /* Support only cores as target */ + if (target != H_RPTI_TARGET_CMMU) + return H_P2; + + return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes, + start, end); + } + + /* + * Process-scoped invalidation for L1 guests. + */ + do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid, + type, pg_sizes, start, end); + return H_SUCCESS; +} + int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) { + struct kvm *kvm = vcpu->kvm; unsigned long req = kvmppc_get_gpr(vcpu, 3); unsigned long target, ret = H_SUCCESS; int yield_count; @@ -788,17 +1156,63 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) return RESUME_HOST; switch (req) { + case H_REMOVE: + ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6)); + if (ret == H_TOO_HARD) + return RESUME_HOST; + break; + case H_ENTER: + ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6), + kvmppc_get_gpr(vcpu, 7)); + if (ret == H_TOO_HARD) + return RESUME_HOST; + break; + case H_READ: + ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5)); + if (ret == H_TOO_HARD) + return RESUME_HOST; + break; + case H_CLEAR_MOD: + ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5)); + if (ret == H_TOO_HARD) + return RESUME_HOST; + break; + case H_CLEAR_REF: + ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5)); + if (ret == H_TOO_HARD) + return RESUME_HOST; + break; + case H_PROTECT: + ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6)); + if (ret == H_TOO_HARD) + return RESUME_HOST; + break; + case H_BULK_REMOVE: + ret = kvmppc_h_bulk_remove(vcpu); + if (ret == H_TOO_HARD) + return RESUME_HOST; + break; + case H_CEDE: break; case H_PROD: target = kvmppc_get_gpr(vcpu, 4); - tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); + tvcpu = kvmppc_find_vcpu(kvm, target); if (!tvcpu) { ret = H_PARAMETER; break; } tvcpu->arch.prodded = 1; - smp_mb(); + smp_mb(); /* This orders prodded store vs ceded load */ if (tvcpu->arch.ceded) kvmppc_fast_vcpu_kick_hv(tvcpu); break; @@ -806,7 +1220,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) target = kvmppc_get_gpr(vcpu, 4); if (target == -1) break; - tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); + tvcpu = kvmppc_find_vcpu(kvm, target); if (!tvcpu) { ret = H_PARAMETER; break; @@ -822,12 +1236,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) kvmppc_get_gpr(vcpu, 6)); break; case H_RTAS: - if (list_empty(&vcpu->kvm->arch.rtas_tokens)) + if (list_empty(&kvm->arch.rtas_tokens)) return RESUME_HOST; - idx = srcu_read_lock(&vcpu->kvm->srcu); + idx = srcu_read_lock(&kvm->srcu); rc = kvmppc_rtas_hcall(vcpu); - srcu_read_unlock(&vcpu->kvm->srcu, idx); + srcu_read_unlock(&kvm->srcu, idx); if (rc == -ENOENT) return RESUME_HOST; @@ -861,7 +1275,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) case H_IPOLL: case H_XIRR_X: if (kvmppc_xics_enabled(vcpu)) { - if (xive_enabled()) { + if (xics_on_xive()) { ret = H_NOT_AVAILABLE; return RESUME_GUEST; } @@ -869,6 +1283,20 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) break; } return RESUME_HOST; + case H_SET_DABR: + ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4)); + break; + case H_SET_XDABR: + ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5)); + break; +#ifdef CONFIG_SPAPR_TCE_IOMMU + case H_GET_TCE: + ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5)); + if (ret == H_TOO_HARD) + return RESUME_HOST; + break; case H_PUT_TCE: ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), @@ -892,14 +1320,124 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) if (ret == H_TOO_HARD) return RESUME_HOST; break; +#endif + case H_RANDOM: { + unsigned long rand; + + if (!arch_get_random_seed_longs(&rand, 1)) + ret = H_HARDWARE; + kvmppc_set_gpr(vcpu, 4, rand); + break; + } + case H_RPT_INVALIDATE: + ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6), + kvmppc_get_gpr(vcpu, 7), + kvmppc_get_gpr(vcpu, 8), + kvmppc_get_gpr(vcpu, 9)); + break; + + case H_SET_PARTITION_TABLE: + ret = H_FUNCTION; + if (nesting_enabled(kvm)) + ret = kvmhv_set_partition_table(vcpu); + break; + case H_ENTER_NESTED: + ret = H_FUNCTION; + if (!nesting_enabled(kvm)) + break; + ret = kvmhv_enter_nested_guest(vcpu); + if (ret == H_INTERRUPT) { + kvmppc_set_gpr(vcpu, 3, 0); + vcpu->arch.hcall_needed = 0; + return -EINTR; + } else if (ret == H_TOO_HARD) { + kvmppc_set_gpr(vcpu, 3, 0); + vcpu->arch.hcall_needed = 0; + return RESUME_HOST; + } + break; + case H_TLB_INVALIDATE: + ret = H_FUNCTION; + if (nesting_enabled(kvm)) + ret = kvmhv_do_nested_tlbie(vcpu); + break; + case H_COPY_TOFROM_GUEST: + ret = H_FUNCTION; + if (nesting_enabled(kvm)) + ret = kvmhv_copy_tofrom_guest_nested(vcpu); + break; + case H_PAGE_INIT: + ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6)); + break; + case H_SVM_PAGE_IN: + ret = H_UNSUPPORTED; + if (kvmppc_get_srr1(vcpu) & MSR_S) + ret = kvmppc_h_svm_page_in(kvm, + kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6)); + break; + case H_SVM_PAGE_OUT: + ret = H_UNSUPPORTED; + if (kvmppc_get_srr1(vcpu) & MSR_S) + ret = kvmppc_h_svm_page_out(kvm, + kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6)); + break; + case H_SVM_INIT_START: + ret = H_UNSUPPORTED; + if (kvmppc_get_srr1(vcpu) & MSR_S) + ret = kvmppc_h_svm_init_start(kvm); + break; + case H_SVM_INIT_DONE: + ret = H_UNSUPPORTED; + if (kvmppc_get_srr1(vcpu) & MSR_S) + ret = kvmppc_h_svm_init_done(kvm); + break; + case H_SVM_INIT_ABORT: + /* + * Even if that call is made by the Ultravisor, the SSR1 value + * is the guest context one, with the secure bit clear as it has + * not yet been secured. So we can't check it here. + * Instead the kvm->arch.secure_guest flag is checked inside + * kvmppc_h_svm_init_abort(). + */ + ret = kvmppc_h_svm_init_abort(kvm); + break; + default: return RESUME_HOST; } + WARN_ON_ONCE(ret == H_TOO_HARD); kvmppc_set_gpr(vcpu, 3, ret); vcpu->arch.hcall_needed = 0; return RESUME_GUEST; } +/* + * Handle H_CEDE in the P9 path where we don't call the real-mode hcall + * handlers in book3s_hv_rmhandlers.S. + * + * This has to be done early, not in kvmppc_pseries_do_hcall(), so + * that the cede logic in kvmppc_run_single_vcpu() works properly. + */ +static void kvmppc_cede(struct kvm_vcpu *vcpu) +{ + __kvmppc_set_msr_hv(vcpu, __kvmppc_get_msr_hv(vcpu) | MSR_EE); + vcpu->arch.ceded = 1; + smp_mb(); + if (vcpu->arch.prodded) { + vcpu->arch.prodded = 0; + smp_mb(); + vcpu->arch.ceded = 0; + } +} + static int kvmppc_hcall_impl_hv(unsigned long cmd) { switch (cmd) { @@ -908,6 +1446,12 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd) case H_CONFER: case H_REGISTER_VPA: case H_SET_MODE: +#ifdef CONFIG_SPAPR_TCE_IOMMU + case H_GET_TCE: + case H_PUT_TCE: + case H_PUT_TCE_INDIRECT: + case H_STUFF_TCE: +#endif case H_LOGICAL_CI_LOAD: case H_LOGICAL_CI_STORE: #ifdef CONFIG_KVM_XICS @@ -918,6 +1462,8 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd) case H_IPOLL: case H_XIRR_X: #endif + case H_PAGE_INIT: + case H_RPT_INVALIDATE: return 1; } @@ -925,10 +1471,9 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd) return kvmppc_hcall_impl_hv_realmode(cmd); } -static int kvmppc_emulate_debug_inst(struct kvm_run *run, - struct kvm_vcpu *vcpu) +static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu) { - u32 last_inst; + ppc_inst_t last_inst; if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != EMULATE_DONE) { @@ -939,12 +1484,13 @@ static int kvmppc_emulate_debug_inst(struct kvm_run *run, return RESUME_GUEST; } - if (last_inst == KVMPPC_INST_SW_BREAKPOINT) { - run->exit_reason = KVM_EXIT_DEBUG; - run->debug.arch.address = kvmppc_get_pc(vcpu); + if (ppc_inst_val(last_inst) == KVMPPC_INST_SW_BREAKPOINT) { + vcpu->run->exit_reason = KVM_EXIT_DEBUG; + vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); return RESUME_HOST; } else { - kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); return RESUME_GUEST; } } @@ -992,11 +1538,11 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) unsigned long arg; struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *tvcpu; + ppc_inst_t pinst; - if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return EMULATE_FAIL; - if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE) + if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst) != EMULATE_DONE) return RESUME_GUEST; + inst = ppc_inst_val(pinst); if (get_op(inst) != 31) return EMULATE_FAIL; rb = get_rb(inst); @@ -1004,9 +1550,9 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) switch (get_xop(inst)) { case OP_31_XOP_MSGSNDP: arg = kvmppc_get_gpr(vcpu, rb); - if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER) + if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER) break; - arg &= 0x3f; + arg &= 0x7f; if (arg >= kvm->arch.emul_smt_mode) break; tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg); @@ -1019,7 +1565,7 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) break; case OP_31_XOP_MSGCLRP: arg = kvmppc_get_gpr(vcpu, rb); - if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER) + if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER) break; vcpu->arch.vcore->dpdes = 0; vcpu->arch.doorbell_request = 0; @@ -1044,9 +1590,47 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) return RESUME_GUEST; } -static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, +/* + * If the lppaca had pmcregs_in_use clear when we exited the guest, then + * HFSCR_PM is cleared for next entry. If the guest then tries to access + * the PMU SPRs, we get this facility unavailable interrupt. Putting HFSCR_PM + * back in the guest HFSCR will cause the next entry to load the PMU SPRs and + * allow the guest access to continue. + */ +static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu) +{ + if (!(vcpu->arch.hfscr_permitted & HFSCR_PM)) + return EMULATE_FAIL; + + kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PM); + + return RESUME_GUEST; +} + +static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu) +{ + if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB)) + return EMULATE_FAIL; + + kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_EBB); + + return RESUME_GUEST; +} + +static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu) +{ + if (!(vcpu->arch.hfscr_permitted & HFSCR_TM)) + return EMULATE_FAIL; + + kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM); + + return RESUME_GUEST; +} + +static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, struct task_struct *tsk) { + struct kvm_run *run = vcpu->run; int r = RESUME_HOST; vcpu->stat.sum_exits++; @@ -1059,7 +1643,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, * That can happen due to a bug, or due to a machine check * occurring at just the wrong time. */ - if (vcpu->arch.shregs.msr & MSR_HV) { + if (!kvmhv_is_nestedv2() && (__kvmppc_get_msr_hv(vcpu) & MSR_HV)) { printk(KERN_EMERG "KVM trap in HV mode!\n"); printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", vcpu->arch.trap, kvmppc_get_pc(vcpu), @@ -1073,6 +1657,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, run->ready_for_interrupt_injection = 1; switch (vcpu->arch.trap) { /* We're good on these - the host merely wanted to get our attention */ + case BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER: + WARN_ON_ONCE(1); /* Should never happen */ + vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; + fallthrough; case BOOK3S_INTERRUPT_HV_DECREMENTER: vcpu->stat.dec_exits++; r = RESUME_GUEST; @@ -1083,12 +1671,36 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->stat.ext_intr_exits++; r = RESUME_GUEST; break; - /* HMI is hypervisor interrupt and host has handled it. Resume guest.*/ + /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/ case BOOK3S_INTERRUPT_HMI: case BOOK3S_INTERRUPT_PERFMON: + case BOOK3S_INTERRUPT_SYSTEM_RESET: r = RESUME_GUEST; break; - case BOOK3S_INTERRUPT_MACHINE_CHECK: + case BOOK3S_INTERRUPT_MACHINE_CHECK: { + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + /* + * Print the MCE event to host console. Ratelimit so the guest + * can't flood the host log. + */ + if (__ratelimit(&rs)) + machine_check_print_event_info(&vcpu->arch.mce_evt,false, true); + + /* + * If the guest can do FWNMI, exit to userspace so it can + * deliver a FWNMI to the guest. + * Otherwise we synthesize a machine check for the guest + * so that it knows that the machine check occurred. + */ + if (!vcpu->kvm->arch.fwnmi_enabled) { + ulong flags = (__kvmppc_get_msr_hv(vcpu) & 0x083c0000) | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); + kvmppc_core_queue_machine_check(vcpu, flags); + r = RESUME_GUEST; + break; + } + /* Exit to guest with KVM_EXIT_NMI as exit reason */ run->exit_reason = KVM_EXIT_NMI; run->hw.hardware_exit_reason = vcpu->arch.trap; @@ -1101,9 +1713,8 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV; r = RESUME_HOST; - /* Print the MCE event to host console. */ - machine_check_print_event_info(&vcpu->arch.mce_evt, false); break; + } case BOOK3S_INTERRUPT_PROGRAM: { ulong flags; @@ -1113,20 +1724,47 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, * as a result of a hypervisor emulation interrupt * (e40) getting turned into a 700 by BML RTAS. */ - flags = vcpu->arch.shregs.msr & 0x1f0000ull; + flags = (__kvmppc_get_msr_hv(vcpu) & 0x1f0000ull) | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; } case BOOK3S_INTERRUPT_SYSCALL: { - /* hcall - punt to userspace */ int i; - /* hypercall with MSR_PR has already been handled in rmode, - * and never reaches here. - */ + if (!kvmhv_is_nestedv2() && unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) { + /* + * Guest userspace executed sc 1. This can only be + * reached by the P9 path because the old path + * handles this case in realmode hcall handlers. + */ + if (!kvmhv_vcpu_is_radix(vcpu)) { + /* + * A guest could be running PR KVM, so this + * may be a PR KVM hcall. It must be reflected + * to the guest kernel as a sc interrupt. + */ + kvmppc_core_queue_syscall(vcpu); + } else { + /* + * Radix guests can not run PR KVM or nested HV + * hash guests which might run PR KVM, so this + * is always a privilege fault. Send a program + * check to guest kernel. + */ + kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); + } + r = RESUME_GUEST; + break; + } + /* + * hcall - gather args and set exit_reason. This will next be + * handled by kvmppc_pseries_do_hcall which may be able to deal + * with it and resume guest, or may punt to userspace. + */ run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); for (i = 0; i < 9; ++i) run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); @@ -1139,17 +1777,107 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, * We get these next two if the guest accesses a page which it thinks * it has mapped but which is not actually present, either because * it is for an emulated I/O device or because the corresonding - * host page has been paged out. Any other HDSI/HISI interrupts - * have been handled already. + * host page has been paged out. + * + * Any other HDSI/HISI interrupts have been handled already for P7/8 + * guests. For POWER9 hash guests not using rmhandlers, basic hash + * fault handling is done here. */ - case BOOK3S_INTERRUPT_H_DATA_STORAGE: - r = RESUME_PAGE_FAULT; + case BOOK3S_INTERRUPT_H_DATA_STORAGE: { + unsigned long vsid; + long err; + + if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) && + unlikely(vcpu->arch.fault_dsisr == HDSISR_CANARY)) { + r = RESUME_GUEST; /* Just retry if it's the canary */ + break; + } + + if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { + /* + * Radix doesn't require anything, and pre-ISAv3.0 hash + * already attempted to handle this in rmhandlers. The + * hash fault handling below is v3 only (it uses ASDR + * via fault_gpa). + */ + r = RESUME_PAGE_FAULT; + break; + } + + if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); + r = RESUME_GUEST; + break; + } + + if (!(__kvmppc_get_msr_hv(vcpu) & MSR_DR)) + vsid = vcpu->kvm->arch.vrma_slb_v; + else + vsid = vcpu->arch.fault_gpa; + + err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, + vsid, vcpu->arch.fault_dsisr, true); + if (err == 0) { + r = RESUME_GUEST; + } else if (err == -1 || err == -2) { + r = RESUME_PAGE_FAULT; + } else { + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + vcpu->arch.fault_dar, err); + r = RESUME_GUEST; + } break; - case BOOK3S_INTERRUPT_H_INST_STORAGE: + } + case BOOK3S_INTERRUPT_H_INST_STORAGE: { + unsigned long vsid; + long err; + vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); - vcpu->arch.fault_dsisr = 0; - r = RESUME_PAGE_FAULT; + vcpu->arch.fault_dsisr = __kvmppc_get_msr_hv(vcpu) & + DSISR_SRR1_MATCH_64S; + if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { + /* + * Radix doesn't require anything, and pre-ISAv3.0 hash + * already attempted to handle this in rmhandlers. The + * hash fault handling below is v3 only (it uses ASDR + * via fault_gpa). + */ + if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE) + vcpu->arch.fault_dsisr |= DSISR_ISSTORE; + r = RESUME_PAGE_FAULT; + break; + } + + if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { + kvmppc_core_queue_inst_storage(vcpu, + vcpu->arch.fault_dsisr | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); + r = RESUME_GUEST; + break; + } + + if (!(__kvmppc_get_msr_hv(vcpu) & MSR_IR)) + vsid = vcpu->kvm->arch.vrma_slb_v; + else + vsid = vcpu->arch.fault_gpa; + + err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, + vsid, vcpu->arch.fault_dsisr, false); + if (err == 0) { + r = RESUME_GUEST; + } else if (err == -1) { + r = RESUME_PAGE_FAULT; + } else { + kvmppc_core_queue_inst_storage(vcpu, + err | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); + r = RESUME_GUEST; + } break; + } + /* * This occurs if the guest executes an illegal instruction. * If the guest debug is disabled, generate a program interrupt @@ -1163,12 +1891,28 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, swab32(vcpu->arch.emul_inst) : vcpu->arch.emul_inst; if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { - r = kvmppc_emulate_debug_inst(run, vcpu); + r = kvmppc_emulate_debug_inst(vcpu); } else { - kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; } break; + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + case BOOK3S_INTERRUPT_HV_SOFTPATCH: + /* + * This occurs for various TM-related instructions that + * we need to emulate on POWER9 DD2.2. We have already + * handled the cases where the guest was in real-suspend + * mode and was transitioning to transactional state. + */ + r = kvmhv_p9_tm_emulation(vcpu); + if (r != -1) + break; + fallthrough; /* go to facility unavailable handler */ +#endif + /* * This occurs if the guest (kernel or userspace), does something that * is prohibited by HFSCR. @@ -1176,15 +1920,36 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, * to emulate. * Otherwise, we just generate a program interrupt to the guest. */ - case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: + case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: { + u64 cause = kvmppc_get_hfscr_hv(vcpu) >> 56; + r = EMULATE_FAIL; - if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) - r = kvmppc_emulate_doorbell_instr(vcpu); + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + switch (cause) { + case FSCR_MSGP_LG: + r = kvmppc_emulate_doorbell_instr(vcpu); + break; + case FSCR_PM_LG: + r = kvmppc_pmu_unavailable(vcpu); + break; + case FSCR_EBB_LG: + r = kvmppc_ebb_unavailable(vcpu); + break; + case FSCR_TM_LG: + r = kvmppc_tm_unavailable(vcpu); + break; + default: + break; + } + } if (r == EMULATE_FAIL) { - kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; } break; + } + case BOOK3S_INTERRUPT_HV_RM_HARD: r = RESUME_PASSTHROUGH; break; @@ -1192,7 +1957,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_dump_regs(vcpu); printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", vcpu->arch.trap, kvmppc_get_pc(vcpu), - vcpu->arch.shregs.msr); + __kvmppc_get_msr_hv(vcpu)); run->hw.hardware_exit_reason = vcpu->arch.trap; r = RESUME_HOST; break; @@ -1201,6 +1966,138 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, return r; } +static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) +{ + int r; + int srcu_idx; + + vcpu->stat.sum_exits++; + + /* + * This can happen if an interrupt occurs in the last stages + * of guest entry or the first stages of guest exit (i.e. after + * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV + * and before setting it to KVM_GUEST_MODE_HOST_HV). + * That can happen due to a bug, or due to a machine check + * occurring at just the wrong time. + */ + if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) { + pr_emerg("KVM trap in HV mode while nested!\n"); + pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n", + vcpu->arch.trap, kvmppc_get_pc(vcpu), + __kvmppc_get_msr_hv(vcpu)); + kvmppc_dump_regs(vcpu); + return RESUME_HOST; + } + switch (vcpu->arch.trap) { + /* We're good on these - the host merely wanted to get our attention */ + case BOOK3S_INTERRUPT_HV_DECREMENTER: + vcpu->stat.dec_exits++; + r = RESUME_GUEST; + break; + case BOOK3S_INTERRUPT_EXTERNAL: + vcpu->stat.ext_intr_exits++; + r = RESUME_HOST; + break; + case BOOK3S_INTERRUPT_H_DOORBELL: + case BOOK3S_INTERRUPT_H_VIRT: + vcpu->stat.ext_intr_exits++; + r = RESUME_GUEST; + break; + /* These need to go to the nested HV */ + case BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER: + vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; + vcpu->stat.dec_exits++; + r = RESUME_HOST; + break; + /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/ + case BOOK3S_INTERRUPT_HMI: + case BOOK3S_INTERRUPT_PERFMON: + case BOOK3S_INTERRUPT_SYSTEM_RESET: + r = RESUME_GUEST; + break; + case BOOK3S_INTERRUPT_MACHINE_CHECK: + { + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + /* Pass the machine check to the L1 guest */ + r = RESUME_HOST; + /* Print the MCE event to host console. */ + if (__ratelimit(&rs)) + machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); + break; + } + /* + * We get these next two if the guest accesses a page which it thinks + * it has mapped but which is not actually present, either because + * it is for an emulated I/O device or because the corresonding + * host page has been paged out. + */ + case BOOK3S_INTERRUPT_H_DATA_STORAGE: + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + r = kvmhv_nested_page_fault(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); + break; + case BOOK3S_INTERRUPT_H_INST_STORAGE: + vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); + vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & + DSISR_SRR1_MATCH_64S; + if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE) + vcpu->arch.fault_dsisr |= DSISR_ISSTORE; + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + r = kvmhv_nested_page_fault(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); + break; + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + case BOOK3S_INTERRUPT_HV_SOFTPATCH: + /* + * This occurs for various TM-related instructions that + * we need to emulate on POWER9 DD2.2. We have already + * handled the cases where the guest was in real-suspend + * mode and was transitioning to transactional state. + */ + r = kvmhv_p9_tm_emulation(vcpu); + if (r != -1) + break; + fallthrough; /* go to facility unavailable handler */ +#endif + + case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: + r = RESUME_HOST; + break; + + case BOOK3S_INTERRUPT_HV_RM_HARD: + vcpu->arch.trap = 0; + r = RESUME_GUEST; + if (!xics_on_xive()) + kvmppc_xics_rm_complete(vcpu, 0); + break; + case BOOK3S_INTERRUPT_SYSCALL: + { + unsigned long req = kvmppc_get_gpr(vcpu, 3); + + /* + * The H_RPT_INVALIDATE hcalls issued by nested + * guests for process-scoped invalidations when + * GTSE=0, are handled here in L0. + */ + if (req == H_RPT_INVALIDATE) { + r = kvmppc_nested_h_rpt_invalidate(vcpu); + break; + } + + r = RESUME_HOST; + break; + } + default: + r = RESUME_HOST; + break; + } + + return r; +} + static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { @@ -1238,6 +2135,49 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, return 0; } +/* + * Enforce limits on guest LPCR values based on hardware availability, + * guest configuration, and possibly hypervisor support and security + * concerns. + */ +unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, unsigned long lpcr) +{ + /* LPCR_TC only applies to HPT guests */ + if (kvm_is_radix(kvm)) + lpcr &= ~LPCR_TC; + + /* On POWER8 and above, userspace can modify AIL */ + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) + lpcr &= ~LPCR_AIL; + if ((lpcr & LPCR_AIL) != LPCR_AIL_3) + lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */ + /* + * On some POWER9s we force AIL off for radix guests to prevent + * executing in MSR[HV]=1 mode with the MMU enabled and PIDR set to + * guest, which can result in Q0 translations with LPID=0 PID=PIDR to + * be cached, which the host TLB management does not expect. + */ + if (kvm_is_radix(kvm) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) + lpcr &= ~LPCR_AIL; + + /* + * On POWER9, allow userspace to enable large decrementer for the + * guest, whether or not the host has it enabled. + */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + lpcr &= ~LPCR_LD; + + return lpcr; +} + +static void verify_lpcr(struct kvm *kvm, unsigned long lpcr) +{ + if (lpcr != kvmppc_filter_lpcr_hv(kvm, lpcr)) { + WARN_ONCE(1, "lpcr 0x%lx differs from filtered 0x%lx\n", + lpcr, kvmppc_filter_lpcr_hv(kvm, lpcr)); + } +} + static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, bool preserve_top32) { @@ -1245,15 +2185,31 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, struct kvmppc_vcore *vc = vcpu->arch.vcore; u64 mask; - mutex_lock(&kvm->lock); spin_lock(&vc->lock); + + /* + * Userspace can only modify + * DPFD (default prefetch depth), ILE (interrupt little-endian), + * TC (translation control), AIL (alternate interrupt location), + * LD (large decrementer). + * These are subject to restrictions from kvmppc_filter_lcpr_hv(). + */ + mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD; + + /* Broken 32-bit version of LPCR must not clear top bits */ + if (preserve_top32) + mask &= 0xFFFFFFFF; + + new_lpcr = kvmppc_filter_lpcr_hv(kvm, + (vc->lpcr & ~mask) | (new_lpcr & mask)); + /* * If ILE (interrupt little-endian) has changed, update the * MSR_LE bit in the intr_msr for each vcpu in this vcore. */ if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->arch.vcore != vc) @@ -1265,27 +2221,10 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, } } - /* - * Userspace can only modify DPFD (default prefetch depth), - * ILE (interrupt little-endian) and TC (translation control). - * On POWER8 and POWER9 userspace can also modify AIL (alt. interrupt loc.). - */ - mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; - if (cpu_has_feature(CPU_FTR_ARCH_207S)) - mask |= LPCR_AIL; - /* - * On POWER9, allow userspace to enable large decrementer for the - * guest, whether or not the host has it enabled. - */ - if (cpu_has_feature(CPU_FTR_ARCH_300)) - mask |= LPCR_LD; + vc->lpcr = new_lpcr; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LPCR); - /* Broken 32-bit version of LPCR must not clear top bits */ - if (preserve_top32) - mask &= 0xFFFFFFFF; - vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); spin_unlock(&vc->lock); - mutex_unlock(&kvm->lock); } static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, @@ -1308,61 +2247,103 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, *val = get_reg_val(id, vcpu->arch.dabrx); break; case KVM_REG_PPC_DSCR: - *val = get_reg_val(id, vcpu->arch.dscr); + *val = get_reg_val(id, kvmppc_get_dscr_hv(vcpu)); break; case KVM_REG_PPC_PURR: - *val = get_reg_val(id, vcpu->arch.purr); + *val = get_reg_val(id, kvmppc_get_purr_hv(vcpu)); break; case KVM_REG_PPC_SPURR: - *val = get_reg_val(id, vcpu->arch.spurr); + *val = get_reg_val(id, kvmppc_get_spurr_hv(vcpu)); break; case KVM_REG_PPC_AMR: - *val = get_reg_val(id, vcpu->arch.amr); + *val = get_reg_val(id, kvmppc_get_amr_hv(vcpu)); break; case KVM_REG_PPC_UAMOR: - *val = get_reg_val(id, vcpu->arch.uamor); + *val = get_reg_val(id, kvmppc_get_uamor_hv(vcpu)); break; - case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: + case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1: i = id - KVM_REG_PPC_MMCR0; - *val = get_reg_val(id, vcpu->arch.mmcr[i]); + *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, i)); + break; + case KVM_REG_PPC_MMCR2: + *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 2)); + break; + case KVM_REG_PPC_MMCRA: + *val = get_reg_val(id, kvmppc_get_mmcra_hv(vcpu)); + break; + case KVM_REG_PPC_MMCRS: + *val = get_reg_val(id, vcpu->arch.mmcrs); + break; + case KVM_REG_PPC_MMCR3: + *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 3)); break; case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: i = id - KVM_REG_PPC_PMC1; - *val = get_reg_val(id, vcpu->arch.pmc[i]); + *val = get_reg_val(id, kvmppc_get_pmc_hv(vcpu, i)); break; case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: i = id - KVM_REG_PPC_SPMC1; *val = get_reg_val(id, vcpu->arch.spmc[i]); break; case KVM_REG_PPC_SIAR: - *val = get_reg_val(id, vcpu->arch.siar); + *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu)); break; case KVM_REG_PPC_SDAR: - *val = get_reg_val(id, vcpu->arch.sdar); + *val = get_reg_val(id, kvmppc_get_sdar_hv(vcpu)); break; case KVM_REG_PPC_SIER: - *val = get_reg_val(id, vcpu->arch.sier); + *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 0)); + break; + case KVM_REG_PPC_SIER2: + *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 1)); + break; + case KVM_REG_PPC_SIER3: + *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 2)); break; case KVM_REG_PPC_IAMR: - *val = get_reg_val(id, vcpu->arch.iamr); + *val = get_reg_val(id, kvmppc_get_iamr_hv(vcpu)); break; case KVM_REG_PPC_PSPB: - *val = get_reg_val(id, vcpu->arch.pspb); + *val = get_reg_val(id, kvmppc_get_pspb_hv(vcpu)); break; case KVM_REG_PPC_DPDES: - *val = get_reg_val(id, vcpu->arch.vcore->dpdes); + /* + * On POWER9, where we are emulating msgsndp etc., + * we return 1 bit for each vcpu, which can come from + * either vcore->dpdes or doorbell_request. + * On POWER8, doorbell_request is 0. + */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) + *val = get_reg_val(id, vcpu->arch.doorbell_request); + else + *val = get_reg_val(id, vcpu->arch.vcore->dpdes); break; case KVM_REG_PPC_VTB: - *val = get_reg_val(id, vcpu->arch.vcore->vtb); + *val = get_reg_val(id, kvmppc_get_vtb(vcpu)); break; case KVM_REG_PPC_DAWR: - *val = get_reg_val(id, vcpu->arch.dawr); + *val = get_reg_val(id, kvmppc_get_dawr0_hv(vcpu)); break; case KVM_REG_PPC_DAWRX: - *val = get_reg_val(id, vcpu->arch.dawrx); + *val = get_reg_val(id, kvmppc_get_dawrx0_hv(vcpu)); + break; + case KVM_REG_PPC_DAWR1: + *val = get_reg_val(id, kvmppc_get_dawr1_hv(vcpu)); + break; + case KVM_REG_PPC_DAWRX1: + *val = get_reg_val(id, kvmppc_get_dawrx1_hv(vcpu)); + break; + case KVM_REG_PPC_DEXCR: + *val = get_reg_val(id, kvmppc_get_dexcr_hv(vcpu)); + break; + case KVM_REG_PPC_HASHKEYR: + *val = get_reg_val(id, kvmppc_get_hashkeyr_hv(vcpu)); + break; + case KVM_REG_PPC_HASHPKEYR: + *val = get_reg_val(id, kvmppc_get_hashpkeyr_hv(vcpu)); break; case KVM_REG_PPC_CIABR: - *val = get_reg_val(id, vcpu->arch.ciabr); + *val = get_reg_val(id, kvmppc_get_ciabr_hv(vcpu)); break; case KVM_REG_PPC_CSIGR: *val = get_reg_val(id, vcpu->arch.csigr); @@ -1374,13 +2355,13 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, *val = get_reg_val(id, vcpu->arch.tcscr); break; case KVM_REG_PPC_PID: - *val = get_reg_val(id, vcpu->arch.pid); + *val = get_reg_val(id, kvmppc_get_pid(vcpu)); break; case KVM_REG_PPC_ACOP: *val = get_reg_val(id, vcpu->arch.acop); break; case KVM_REG_PPC_WORT: - *val = get_reg_val(id, vcpu->arch.wort); + *val = get_reg_val(id, kvmppc_get_wort_hv(vcpu)); break; case KVM_REG_PPC_TIDR: *val = get_reg_val(id, vcpu->arch.tid); @@ -1406,14 +2387,14 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, spin_unlock(&vcpu->arch.vpa_update_lock); break; case KVM_REG_PPC_TB_OFFSET: - *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); + *val = get_reg_val(id, kvmppc_get_tb_offset(vcpu)); break; case KVM_REG_PPC_LPCR: case KVM_REG_PPC_LPCR_64: - *val = get_reg_val(id, vcpu->arch.vcore->lpcr); + *val = get_reg_val(id, kvmppc_get_lpcr(vcpu)); break; case KVM_REG_PPC_PPR: - *val = get_reg_val(id, vcpu->arch.ppr); + *val = get_reg_val(id, kvmppc_get_ppr_hv(vcpu)); break; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case KVM_REG_PPC_TFHAR: @@ -1482,7 +2463,19 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, break; #endif case KVM_REG_PPC_ARCH_COMPAT: - *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); + *val = get_reg_val(id, kvmppc_get_arch_compat(vcpu)); + break; + case KVM_REG_PPC_DEC_EXPIRY: + *val = get_reg_val(id, kvmppc_get_dec_expires(vcpu)); + break; + case KVM_REG_PPC_ONLINE: + *val = get_reg_val(id, vcpu->arch.online); + break; + case KVM_REG_PPC_PTCR: + *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); + break; + case KVM_REG_PPC_FSCR: + *val = get_reg_val(id, kvmppc_get_fscr_hv(vcpu)); break; default: r = -EINVAL; @@ -1512,64 +2505,100 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; break; case KVM_REG_PPC_DSCR: - vcpu->arch.dscr = set_reg_val(id, *val); + kvmppc_set_dscr_hv(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_PURR: - vcpu->arch.purr = set_reg_val(id, *val); + kvmppc_set_purr_hv(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_SPURR: - vcpu->arch.spurr = set_reg_val(id, *val); + kvmppc_set_spurr_hv(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_AMR: - vcpu->arch.amr = set_reg_val(id, *val); + kvmppc_set_amr_hv(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_UAMOR: - vcpu->arch.uamor = set_reg_val(id, *val); + kvmppc_set_uamor_hv(vcpu, set_reg_val(id, *val)); break; - case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: + case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1: i = id - KVM_REG_PPC_MMCR0; - vcpu->arch.mmcr[i] = set_reg_val(id, *val); + kvmppc_set_mmcr_hv(vcpu, i, set_reg_val(id, *val)); + break; + case KVM_REG_PPC_MMCR2: + kvmppc_set_mmcr_hv(vcpu, 2, set_reg_val(id, *val)); + break; + case KVM_REG_PPC_MMCRA: + kvmppc_set_mmcra_hv(vcpu, set_reg_val(id, *val)); + break; + case KVM_REG_PPC_MMCRS: + vcpu->arch.mmcrs = set_reg_val(id, *val); + break; + case KVM_REG_PPC_MMCR3: + kvmppc_set_mmcr_hv(vcpu, 3, set_reg_val(id, *val)); break; case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: i = id - KVM_REG_PPC_PMC1; - vcpu->arch.pmc[i] = set_reg_val(id, *val); + kvmppc_set_pmc_hv(vcpu, i, set_reg_val(id, *val)); break; case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: i = id - KVM_REG_PPC_SPMC1; vcpu->arch.spmc[i] = set_reg_val(id, *val); break; case KVM_REG_PPC_SIAR: - vcpu->arch.siar = set_reg_val(id, *val); + kvmppc_set_siar_hv(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_SDAR: - vcpu->arch.sdar = set_reg_val(id, *val); + kvmppc_set_sdar_hv(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_SIER: - vcpu->arch.sier = set_reg_val(id, *val); + kvmppc_set_sier_hv(vcpu, 0, set_reg_val(id, *val)); + break; + case KVM_REG_PPC_SIER2: + kvmppc_set_sier_hv(vcpu, 1, set_reg_val(id, *val)); + break; + case KVM_REG_PPC_SIER3: + kvmppc_set_sier_hv(vcpu, 2, set_reg_val(id, *val)); break; case KVM_REG_PPC_IAMR: - vcpu->arch.iamr = set_reg_val(id, *val); + kvmppc_set_iamr_hv(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_PSPB: - vcpu->arch.pspb = set_reg_val(id, *val); + kvmppc_set_pspb_hv(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_DPDES: - vcpu->arch.vcore->dpdes = set_reg_val(id, *val); + if (cpu_has_feature(CPU_FTR_ARCH_300)) + vcpu->arch.doorbell_request = set_reg_val(id, *val) & 1; + else + vcpu->arch.vcore->dpdes = set_reg_val(id, *val); break; case KVM_REG_PPC_VTB: - vcpu->arch.vcore->vtb = set_reg_val(id, *val); + kvmppc_set_vtb(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_DAWR: - vcpu->arch.dawr = set_reg_val(id, *val); + kvmppc_set_dawr0_hv(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_DAWRX: - vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; + kvmppc_set_dawrx0_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP); + break; + case KVM_REG_PPC_DAWR1: + kvmppc_set_dawr1_hv(vcpu, set_reg_val(id, *val)); + break; + case KVM_REG_PPC_DAWRX1: + kvmppc_set_dawrx1_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP); + break; + case KVM_REG_PPC_DEXCR: + kvmppc_set_dexcr_hv(vcpu, set_reg_val(id, *val)); + break; + case KVM_REG_PPC_HASHKEYR: + kvmppc_set_hashkeyr_hv(vcpu, set_reg_val(id, *val)); + break; + case KVM_REG_PPC_HASHPKEYR: + kvmppc_set_hashpkeyr_hv(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_CIABR: - vcpu->arch.ciabr = set_reg_val(id, *val); + kvmppc_set_ciabr_hv(vcpu, set_reg_val(id, *val)); /* Don't allow setting breakpoints in hypervisor code */ - if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) - vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ + if ((kvmppc_get_ciabr_hv(vcpu) & CIABR_PRIV) == CIABR_PRIV_HYPER) + kvmppc_set_ciabr_hv(vcpu, kvmppc_get_ciabr_hv(vcpu) & ~CIABR_PRIV); break; case KVM_REG_PPC_CSIGR: vcpu->arch.csigr = set_reg_val(id, *val); @@ -1581,13 +2610,13 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, vcpu->arch.tcscr = set_reg_val(id, *val); break; case KVM_REG_PPC_PID: - vcpu->arch.pid = set_reg_val(id, *val); + kvmppc_set_pid(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_ACOP: vcpu->arch.acop = set_reg_val(id, *val); break; case KVM_REG_PPC_WORT: - vcpu->arch.wort = set_reg_val(id, *val); + kvmppc_set_wort_hv(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_TIDR: vcpu->arch.tid = set_reg_val(id, *val); @@ -1622,18 +2651,25 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); break; case KVM_REG_PPC_TB_OFFSET: + { + /* round up to multiple of 2^24 */ + u64 tb_offset = ALIGN(set_reg_val(id, *val), 1UL << 24); + /* - * POWER9 DD1 has an erratum where writing TBU40 causes - * the timebase to lose ticks. So we don't let the - * timebase offset be changed on P9 DD1. (It is - * initialized to zero.) + * Now that we know the timebase offset, update the + * decrementer expiry with a guest timebase value. If + * the userspace does not set DEC_EXPIRY, this ensures + * a migrated vcpu at least starts with an expired + * decrementer, which is better than a large one that + * causes a hang. */ - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - break; - /* round up to multiple of 2^24 */ - vcpu->arch.vcore->tb_offset = - ALIGN(set_reg_val(id, *val), 1UL << 24); + kvmppc_set_tb_offset(vcpu, tb_offset); + if (!kvmppc_get_dec_expires(vcpu) && tb_offset) + kvmppc_set_dec_expires(vcpu, get_tb() + tb_offset); + + kvmppc_set_tb_offset(vcpu, tb_offset); break; + } case KVM_REG_PPC_LPCR: kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true); break; @@ -1641,7 +2677,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false); break; case KVM_REG_PPC_PPR: - vcpu->arch.ppr = set_reg_val(id, *val); + kvmppc_set_ppr_hv(vcpu, set_reg_val(id, *val)); break; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case KVM_REG_PPC_TFHAR: @@ -1711,6 +2747,23 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_ARCH_COMPAT: r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); break; + case KVM_REG_PPC_DEC_EXPIRY: + kvmppc_set_dec_expires(vcpu, set_reg_val(id, *val)); + break; + case KVM_REG_PPC_ONLINE: + i = set_reg_val(id, *val); + if (i && !vcpu->arch.online) + atomic_inc(&vcpu->arch.vcore->online_count); + else if (!i && vcpu->arch.online) + atomic_dec(&vcpu->arch.vcore->online_count); + vcpu->arch.online = i; + break; + case KVM_REG_PPC_PTCR: + vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_FSCR: + kvmppc_set_fscr_hv(vcpu, set_reg_val(id, *val)); + break; default: r = -EINVAL; break; @@ -1726,14 +2779,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, * MMU mode (radix or HPT), unfortunately, but since we only support * HPT guests on a HPT host so far, that isn't an impediment yet. */ -static int threads_per_vcore(void) +static int threads_per_vcore(struct kvm *kvm) { if (cpu_has_feature(CPU_FTR_ARCH_300)) return 1; return threads_per_subcore; } -static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core) +static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id) { struct kvmppc_vcore *vcore; @@ -1744,10 +2797,10 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core) spin_lock_init(&vcore->lock); spin_lock_init(&vcore->stoltb_lock); - init_swait_queue_head(&vcore->wq); + rcuwait_init(&vcore->wait); vcore->preempt_tb = TB_NIL; vcore->lpcr = kvm->arch.lpcr; - vcore->first_vcpuid = core * kvm->arch.smt_mode; + vcore->first_vcpuid = id; vcore->kvm = kvm; INIT_LIST_HEAD(&vcore->preempt_list); @@ -1759,14 +2812,24 @@ static struct debugfs_timings_element { const char *name; size_t offset; } timings[] = { +#ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING + {"vcpu_entry", offsetof(struct kvm_vcpu, arch.vcpu_entry)}, + {"guest_entry", offsetof(struct kvm_vcpu, arch.guest_entry)}, + {"in_guest", offsetof(struct kvm_vcpu, arch.in_guest)}, + {"guest_exit", offsetof(struct kvm_vcpu, arch.guest_exit)}, + {"vcpu_exit", offsetof(struct kvm_vcpu, arch.vcpu_exit)}, + {"hypercall", offsetof(struct kvm_vcpu, arch.hcall)}, + {"page_fault", offsetof(struct kvm_vcpu, arch.pg_fault)}, +#else {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)}, {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)}, {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)}, {"guest", offsetof(struct kvm_vcpu, arch.guest_time)}, {"cede", offsetof(struct kvm_vcpu, arch.cede_time)}, +#endif }; -#define N_TIMINGS (sizeof(timings) / sizeof(timings[0])) +#define N_TIMINGS (ARRAY_SIZE(timings)) struct debugfs_timings_state { struct kvm_vcpu *vcpu; @@ -1880,44 +2943,31 @@ static const struct file_operations debugfs_timings_ops = { }; /* Create a debugfs directory for the vcpu */ -static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id) +static int kvmppc_arch_create_vcpu_debugfs_hv(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) { - char buf[16]; - struct kvm *kvm = vcpu->kvm; - - snprintf(buf, sizeof(buf), "vcpu%u", id); - if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir)) - return; - vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir); - if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir)) - return; - vcpu->arch.debugfs_timings = - debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, - vcpu, &debugfs_timings_ops); + if (cpu_has_feature(CPU_FTR_ARCH_300) == IS_ENABLED(CONFIG_KVM_BOOK3S_HV_P9_TIMING)) + debugfs_create_file("timings", 0444, debugfs_dentry, vcpu, + &debugfs_timings_ops); + return 0; } #else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */ -static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id) +static int kvmppc_arch_create_vcpu_debugfs_hv(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) { + return 0; } #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */ -static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, - unsigned int id) +static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) { - struct kvm_vcpu *vcpu; int err; int core; struct kvmppc_vcore *vcore; + struct kvm *kvm; + unsigned int id; - err = -ENOMEM; - vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); - if (!vcpu) - goto out; - - err = kvm_vcpu_init(vcpu, kvm, id); - if (err) - goto free_vcpu; + kvm = vcpu->kvm; + id = vcpu->vcpu_id; vcpu->arch.shared = &vcpu->arch.shregs; #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE @@ -1931,28 +2981,59 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, vcpu->arch.shared_big_endian = false; #endif #endif - vcpu->arch.mmcr[0] = MMCR0_FC; - vcpu->arch.ctrl = CTRL_RUNLATCH; + + if (kvmhv_is_nestedv2()) { + err = kvmhv_nestedv2_vcpu_create(vcpu, &vcpu->arch.nestedv2_io); + if (err < 0) + return err; + } + + kvmppc_set_mmcr_hv(vcpu, 0, MMCR0_FC); + if (cpu_has_feature(CPU_FTR_ARCH_31)) { + kvmppc_set_mmcr_hv(vcpu, 0, kvmppc_get_mmcr_hv(vcpu, 0) | MMCR0_PMCCEXT); + kvmppc_set_mmcra_hv(vcpu, MMCRA_BHRB_DISABLE); + } + + kvmppc_set_ctrl_hv(vcpu, CTRL_RUNLATCH); /* default to host PVR, since we can't spoof it */ kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR)); spin_lock_init(&vcpu->arch.vpa_update_lock); spin_lock_init(&vcpu->arch.tbacct_lock); vcpu->arch.busy_preempt = TB_NIL; + __kvmppc_set_msr_hv(vcpu, MSR_ME); vcpu->arch.intr_msr = MSR_SF | MSR_ME; /* * Set the default HFSCR for the guest from the host value. - * This value is only used on POWER9. - * On POWER9 DD1, TM doesn't work, so we make sure to - * prevent the guest from using it. - * On POWER9, we want to virtualize the doorbell facility, so we - * turn off the HFSCR bit, which causes those instructions to trap. + * This value is only used on POWER9 and later. + * On >= POWER9, we want to virtualize the doorbell facility, so we + * don't set the HFSCR_MSGP bit, and that causes those instructions + * to trap and then we emulate them. */ - vcpu->arch.hfscr = mfspr(SPRN_HFSCR); - if (!cpu_has_feature(CPU_FTR_TM)) - vcpu->arch.hfscr &= ~HFSCR_TM; - if (cpu_has_feature(CPU_FTR_ARCH_300)) - vcpu->arch.hfscr &= ~HFSCR_MSGP; + kvmppc_set_hfscr_hv(vcpu, HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | + HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP); + + /* On POWER10 and later, allow prefixed instructions */ + if (cpu_has_feature(CPU_FTR_ARCH_31)) + kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PREFIX); + + if (cpu_has_feature(CPU_FTR_HVMODE)) { + kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & mfspr(SPRN_HFSCR)); + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM); +#endif + } + if (cpu_has_feature(CPU_FTR_TM_COMP)) + vcpu->arch.hfscr |= HFSCR_TM; + + vcpu->arch.hfscr_permitted = kvmppc_get_hfscr_hv(vcpu); + + /* + * PM, EBB, TM are demand-faulted so start with it clear. + */ + kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM)); kvmppc_mmu_book3s_hv_init(vcpu); @@ -1963,20 +3044,40 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, mutex_lock(&kvm->lock); vcore = NULL; err = -EINVAL; - core = id / kvm->arch.smt_mode; + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) { + pr_devel("KVM: VCPU ID too high\n"); + core = KVM_MAX_VCORES; + } else { + BUG_ON(kvm->arch.smt_mode != 1); + core = kvmppc_pack_vcpu_id(kvm, id); + } + } else { + core = id / kvm->arch.smt_mode; + } if (core < KVM_MAX_VCORES) { vcore = kvm->arch.vcores[core]; - if (!vcore) { + if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) { + pr_devel("KVM: collision on id %u", id); + vcore = NULL; + } else if (!vcore) { + /* + * Take mmu_setup_lock for mutual exclusion + * with kvmppc_update_lpcr(). + */ err = -ENOMEM; - vcore = kvmppc_vcore_create(kvm, core); + vcore = kvmppc_vcore_create(kvm, + id & ~(kvm->arch.smt_mode - 1)); + mutex_lock(&kvm->arch.mmu_setup_lock); kvm->arch.vcores[core] = vcore; kvm->arch.online_vcores++; + mutex_unlock(&kvm->arch.mmu_setup_lock); } } mutex_unlock(&kvm->lock); if (!vcore) - goto free_vcpu; + return err; spin_lock(&vcore->lock); ++vcore->num_threads; @@ -1989,14 +3090,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, vcpu->arch.cpu_type = KVM_CPU_3S_64; kvmppc_sanity_check(vcpu); - debugfs_vcpu_init(vcpu, id); - - return vcpu; - -free_vcpu: - kmem_cache_free(kvm_vcpu_cache, vcpu); -out: - return ERR_PTR(err); + return 0; } static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode, @@ -2050,8 +3144,8 @@ static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu) unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); spin_unlock(&vcpu->arch.vpa_update_lock); - kvm_vcpu_uninit(vcpu); - kmem_cache_free(kvm_vcpu_cache, vcpu); + if (kvmhv_is_nestedv2()) + kvmhv_nestedv2_vcpu_free(vcpu, &vcpu->arch.nestedv2_io); } static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu) @@ -2065,38 +3159,28 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu) unsigned long dec_nsec, now; now = get_tb(); - if (now > vcpu->arch.dec_expires) { + if (now > kvmppc_dec_expires_host_tb(vcpu)) { /* decrementer has already gone negative */ kvmppc_core_queue_dec(vcpu); kvmppc_core_prepare_to_enter(vcpu); return; } - dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC - / tb_ticks_per_sec; + dec_nsec = tb_to_ns(kvmppc_dec_expires_host_tb(vcpu) - now); hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); vcpu->arch.timer_running = 1; } -static void kvmppc_end_cede(struct kvm_vcpu *vcpu) -{ - vcpu->arch.ceded = 0; - if (vcpu->arch.timer_running) { - hrtimer_try_to_cancel(&vcpu->arch.dec_timer); - vcpu->arch.timer_running = 0; - } -} - extern int __kvmppc_vcore_entry(void); static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, - struct kvm_vcpu *vcpu) + struct kvm_vcpu *vcpu, u64 tb) { u64 now; if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) return; spin_lock_irq(&vcpu->arch.tbacct_lock); - now = mftb(); + now = tb; vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - vcpu->arch.stolen_logged; vcpu->arch.busy_preempt = now; @@ -2111,7 +3195,7 @@ static int kvmppc_grab_hwthread(int cpu) struct paca_struct *tpaca; long timeout = 10000; - tpaca = &paca[cpu]; + tpaca = paca_ptrs[cpu]; /* Ensure the thread won't go into the kernel if it wakes */ tpaca->kvm_hstate.kvm_vcpu = NULL; @@ -2144,33 +3228,81 @@ static void kvmppc_release_hwthread(int cpu) { struct paca_struct *tpaca; - tpaca = &paca[cpu]; + tpaca = paca_ptrs[cpu]; tpaca->kvm_hstate.hwthread_req = 0; tpaca->kvm_hstate.kvm_vcpu = NULL; tpaca->kvm_hstate.kvm_vcore = NULL; tpaca->kvm_hstate.kvm_split_mode = NULL; } +static DEFINE_PER_CPU(struct kvm *, cpu_in_guest); + static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) { + struct kvm_nested_guest *nested = vcpu->arch.nested; + cpumask_t *need_tlb_flush; int i; - cpu = cpu_first_thread_sibling(cpu); - cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush); + if (nested) + need_tlb_flush = &nested->need_tlb_flush; + else + need_tlb_flush = &kvm->arch.need_tlb_flush; + + cpu = cpu_first_tlb_thread_sibling(cpu); + for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu); + i += cpu_tlb_thread_sibling_step()) + cpumask_set_cpu(i, need_tlb_flush); + /* - * Make sure setting of bit in need_tlb_flush precedes - * testing of cpu_in_guest bits. The matching barrier on - * the other side is the first smp_mb() in kvmppc_run_core(). + * Make sure setting of bit in need_tlb_flush precedes testing of + * cpu_in_guest. The matching barrier on the other side is hwsync + * when switching to guest MMU mode, which happens between + * cpu_in_guest being set to the guest kvm, and need_tlb_flush bit + * being tested. */ smp_mb(); - for (i = 0; i < threads_per_core; ++i) - if (cpumask_test_cpu(cpu + i, &kvm->arch.cpu_in_guest)) - smp_call_function_single(cpu + i, do_nothing, NULL, 1); + + for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu); + i += cpu_tlb_thread_sibling_step()) { + struct kvm *running = *per_cpu_ptr(&cpu_in_guest, i); + + if (running == kvm) + smp_call_function_single(i, do_nothing, NULL, 1); + } +} + +static void do_migrate_away_vcpu(void *arg) +{ + struct kvm_vcpu *vcpu = arg; + struct kvm *kvm = vcpu->kvm; + + /* + * If the guest has GTSE, it may execute tlbie, so do a eieio; tlbsync; + * ptesync sequence on the old CPU before migrating to a new one, in + * case we interrupted the guest between a tlbie ; eieio ; + * tlbsync; ptesync sequence. + * + * Otherwise, ptesync is sufficient for ordering tlbiel sequences. + */ + if (kvm->arch.lpcr & LPCR_GTSE) + asm volatile("eieio; tlbsync; ptesync"); + else + asm volatile("ptesync"); } static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu) { + struct kvm_nested_guest *nested = vcpu->arch.nested; struct kvm *kvm = vcpu->kvm; + int prev_cpu; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return; + + if (nested) + prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; + else + prev_cpu = vcpu->arch.prev_cpu; /* * With radix, the guest can do TLB invalidations itself, @@ -2181,15 +3313,21 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu) * can move around between pcpus. To cope with this, when * a vcpu moves from one pcpu to another, we need to tell * any vcpus running on the same core as this vcpu previously - * ran to flush the TLB. The TLB is shared between threads, - * so we use a single bit in .need_tlb_flush for all 4 threads. + * ran to flush the TLB. */ - if (vcpu->arch.prev_cpu != pcpu) { - if (vcpu->arch.prev_cpu >= 0 && - cpu_first_thread_sibling(vcpu->arch.prev_cpu) != - cpu_first_thread_sibling(pcpu)) - radix_flush_cpu(kvm, vcpu->arch.prev_cpu, vcpu); - vcpu->arch.prev_cpu = pcpu; + if (prev_cpu != pcpu) { + if (prev_cpu >= 0) { + if (cpu_first_tlb_thread_sibling(prev_cpu) != + cpu_first_tlb_thread_sibling(pcpu)) + radix_flush_cpu(kvm, prev_cpu, vcpu); + + smp_call_function_single(prev_cpu, + do_migrate_away_vcpu, vcpu, 1); + } + if (nested) + nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; + else + vcpu->arch.prev_cpu = pcpu; } } @@ -2197,7 +3335,6 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) { int cpu; struct paca_struct *tpaca; - struct kvm *kvm = vc->kvm; cpu = vc->pcpu; if (vcpu) { @@ -2208,11 +3345,11 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) cpu += vcpu->arch.ptid; vcpu->cpu = vc->pcpu; vcpu->arch.thread_cpu = cpu; - cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest); } - tpaca = &paca[cpu]; + tpaca = paca_ptrs[cpu]; tpaca->kvm_hstate.kvm_vcpu = vcpu; tpaca->kvm_hstate.ptid = cpu - vc->pcpu; + tpaca->kvm_hstate.fake_suspend = 0; /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */ smp_wmb(); tpaca->kvm_hstate.kvm_vcore = vc; @@ -2220,11 +3357,10 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) kvmppc_ipi_thread(cpu); } -static void kvmppc_wait_for_nap(void) +static void kvmppc_wait_for_nap(int n_threads) { int cpu = smp_processor_id(); int i, loops; - int n_threads = threads_per_vcore(); if (n_threads <= 1) return; @@ -2236,7 +3372,7 @@ static void kvmppc_wait_for_nap(void) * for any threads that still have a non-NULL vcore ptr. */ for (i = 1; i < n_threads; ++i) - if (paca[cpu + i].kvm_hstate.kvm_vcore) + if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) break; if (i == n_threads) { HMT_medium(); @@ -2246,7 +3382,7 @@ static void kvmppc_wait_for_nap(void) } HMT_medium(); for (i = 1; i < n_threads; ++i) - if (paca[cpu + i].kvm_hstate.kvm_vcore) + if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) pr_err("KVM: CPU %d seems to be stuck\n", cpu + i); } @@ -2309,23 +3445,27 @@ static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc) { struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores); + WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); + vc->vcore_state = VCORE_PREEMPT; vc->pcpu = smp_processor_id(); - if (vc->num_threads < threads_per_vcore()) { + if (vc->num_threads < threads_per_vcore(vc->kvm)) { spin_lock(&lp->lock); list_add_tail(&vc->preempt_list, &lp->list); spin_unlock(&lp->lock); } /* Start accumulating stolen time */ - kvmppc_core_start_stolen(vc); + kvmppc_core_start_stolen(vc, mftb()); } static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc) { struct preempted_vcore_list *lp; - kvmppc_core_end_stolen(vc); + WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); + + kvmppc_core_end_stolen(vc, mftb()); if (!list_empty(&vc->preempt_list)) { lp = &per_cpu(preempted_vcores, vc->pcpu); spin_lock(&lp->lock); @@ -2349,7 +3489,7 @@ struct core_info { /* * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7 - * respectively in 2-way micro-threading (split-core) mode. + * respectively in 2-way micro-threading (split-core) mode on POWER8. */ static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 }; @@ -2365,7 +3505,14 @@ static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc) static bool subcore_config_ok(int n_subcores, int n_threads) { - /* Can only dynamically split if unsplit to begin with */ + /* + * POWER9 "SMT4" cores are permanently in what is effectively a 4-way + * split-core mode, with one thread per subcore. + */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) + return n_subcores <= 4 && n_threads == 1; + + /* On POWER8, can only dynamically split if unsplit to begin with */ if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS) return false; if (n_subcores > MAX_SUBCORES) @@ -2386,6 +3533,7 @@ static void init_vcore_to_run(struct kvmppc_vcore *vc) vc->in_guest = 0; vc->napping_threads = 0; vc->conferring_threads = 0; + vc->tb_offset_applied = 0; } static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) @@ -2396,6 +3544,10 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) if (!cpu_has_feature(CPU_FTR_ARCH_207S)) return false; + /* In one_vm_per_core mode, require all vcores to be from the same vm */ + if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm) + return false; + if (n_threads < cip->max_subcore_threads) n_threads = cip->max_subcore_threads; if (!subcore_config_ok(cip->n_subcores + 1, n_threads)) @@ -2440,7 +3592,7 @@ static void prepare_threads(struct kvmppc_vcore *vc) vcpu->arch.ret = RESUME_GUEST; else continue; - kvmppc_remove_runnable(vc, vcpu); + kvmppc_remove_runnable(vc, vcpu, mftb()); wake_up(&vcpu->arch.cpu_run); } } @@ -2455,11 +3607,11 @@ static void collect_piggybacks(struct core_info *cip, int target_threads) if (!spin_trylock(&pvc->lock)) continue; prepare_threads(pvc); - if (!pvc->n_runnable) { + if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) { list_del_init(&pvc->preempt_list); if (pvc->runner == NULL) { pvc->vcore_state = VCORE_INACTIVE; - kvmppc_core_end_stolen(pvc); + kvmppc_core_end_stolen(pvc, mftb()); } spin_unlock(&pvc->lock); continue; @@ -2468,7 +3620,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads) spin_unlock(&pvc->lock); continue; } - kvmppc_core_end_stolen(pvc); + kvmppc_core_end_stolen(pvc, mftb()); pvc->vcore_state = VCORE_PIGGYBACK; if (cip->total_threads >= target_threads) break; @@ -2476,15 +3628,20 @@ static void collect_piggybacks(struct core_info *cip, int target_threads) spin_unlock(&lp->lock); } -static bool recheck_signals(struct core_info *cip) +static bool recheck_signals_and_mmu(struct core_info *cip) { int sub, i; struct kvm_vcpu *vcpu; + struct kvmppc_vcore *vc; - for (sub = 0; sub < cip->n_subcores; ++sub) - for_each_runnable_thread(i, vcpu, cip->vc[sub]) + for (sub = 0; sub < cip->n_subcores; ++sub) { + vc = cip->vc[sub]; + if (!vc->kvm->arch.mmu_ready) + return true; + for_each_runnable_thread(i, vcpu, vc) if (signal_pending(vcpu->arch.run_task)) return true; + } return false; } @@ -2498,8 +3655,16 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) spin_lock(&vc->lock); now = get_tb(); for_each_runnable_thread(i, vcpu, vc) { + /* + * It's safe to unlock the vcore in the loop here, because + * for_each_runnable_thread() is safe against removal of + * the vcpu, and the vcore state is VCORE_EXITING here, + * so any vcpus becoming runnable will have their arch.trap + * set to zero and can't actually run in the guest. + */ + spin_unlock(&vc->lock); /* cancel pending dec exception if dec is positive */ - if (now < vcpu->arch.dec_expires && + if (now < kvmppc_dec_expires_host_tb(vcpu) && kvmppc_core_pending_dec(vcpu)) kvmppc_core_dequeue_dec(vcpu); @@ -2507,12 +3672,13 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) ret = RESUME_GUEST; if (vcpu->arch.trap) - ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, + ret = kvmppc_handle_exit_hv(vcpu, vcpu->arch.run_task); vcpu->arch.ret = ret; vcpu->arch.trap = 0; + spin_lock(&vc->lock); if (is_kvmppc_resume_guest(vcpu->arch.ret)) { if (vcpu->arch.pending_exceptions) kvmppc_core_prepare_to_enter(vcpu); @@ -2521,7 +3687,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) else ++still_running; } else { - kvmppc_remove_runnable(vc, vcpu); + kvmppc_remove_runnable(vc, vcpu, mftb()); wake_up(&vcpu->arch.cpu_run); } } @@ -2530,7 +3696,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) kvmppc_vcore_preempt(vc); } else if (vc->runner) { vc->vcore_state = VCORE_PREEMPT; - kvmppc_core_start_stolen(vc); + kvmppc_core_start_stolen(vc, mftb()); } else { vc->vcore_state = VCORE_INACTIVE; } @@ -2598,6 +3764,9 @@ static void set_irq_happened(int trap) case BOOK3S_INTERRUPT_HMI: local_paca->irq_happened |= PACA_IRQ_HMI; break; + case BOOK3S_INTERRUPT_SYSTEM_RESET: + replay_system_reset(); + break; } } @@ -2621,6 +3790,10 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) int target_threads; int controlled_threads; int trap; + bool is_power8; + + if (WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300))) + return; /* * Remove from the list any threads that have a signal pending @@ -2643,7 +3816,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) * the number of threads per subcore, except on POWER9, * where it's 1 because the threads are (mostly) independent. */ - controlled_threads = threads_per_vcore(); + controlled_threads = threads_per_vcore(vc->kvm); /* * Make sure we are running on primary threads, and that secondary @@ -2654,7 +3827,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { for_each_runnable_thread(i, vcpu, vc) { vcpu->arch.ret = -EBUSY; - kvmppc_remove_runnable(vc, vcpu); + kvmppc_remove_runnable(vc, vcpu, mftb()); wake_up(&vcpu->arch.cpu_run); } goto out; @@ -2673,26 +3846,16 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) collect_piggybacks(&core_info, target_threads); /* - * On radix, arrange for TLB flushing if necessary. - * This has to be done before disabling interrupts since - * it uses smp_call_function(). - */ - pcpu = smp_processor_id(); - if (kvm_is_radix(vc->kvm)) { - for (sub = 0; sub < core_info.n_subcores; ++sub) - for_each_runnable_thread(i, vcpu, core_info.vc[sub]) - kvmppc_prepare_radix_vcpu(vcpu, pcpu); - } - - /* * Hard-disable interrupts, and check resched flag and signals. * If we need to reschedule or deliver a signal, clean up * and return without going into the guest(s). + * If the mmu_ready flag has been cleared, don't go into the + * guest because that means a HPT resize operation is in progress. */ local_irq_disable(); hard_irq_disable(); if (lazy_irq_pending() || need_resched() || - recheck_signals(&core_info)) { + recheck_signals_and_mmu(&core_info)) { local_irq_enable(); vc->vcore_state = VCORE_INACTIVE; /* Unlock all except the primary vcore */ @@ -2714,32 +3877,44 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) cmd_bit = stat_bit = 0; split = core_info.n_subcores; sip = NULL; + is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S); + if (split > 1) { - /* threads_per_subcore must be MAX_SMT_THREADS (8) here */ - if (split == 2 && (dynamic_mt_modes & 2)) { - cmd_bit = HID0_POWER8_1TO2LPAR; - stat_bit = HID0_POWER8_2LPARMODE; - } else { - split = 4; - cmd_bit = HID0_POWER8_1TO4LPAR; - stat_bit = HID0_POWER8_4LPARMODE; - } - subcore_size = MAX_SMT_THREADS / split; sip = &split_info; memset(&split_info, 0, sizeof(split_info)); - split_info.rpr = mfspr(SPRN_RPR); - split_info.pmmar = mfspr(SPRN_PMMAR); - split_info.ldbar = mfspr(SPRN_LDBAR); - split_info.subcore_size = subcore_size; for (sub = 0; sub < core_info.n_subcores; ++sub) split_info.vc[sub] = core_info.vc[sub]; + + if (is_power8) { + if (split == 2 && (dynamic_mt_modes & 2)) { + cmd_bit = HID0_POWER8_1TO2LPAR; + stat_bit = HID0_POWER8_2LPARMODE; + } else { + split = 4; + cmd_bit = HID0_POWER8_1TO4LPAR; + stat_bit = HID0_POWER8_4LPARMODE; + } + subcore_size = MAX_SMT_THREADS / split; + split_info.rpr = mfspr(SPRN_RPR); + split_info.pmmar = mfspr(SPRN_PMMAR); + split_info.ldbar = mfspr(SPRN_LDBAR); + split_info.subcore_size = subcore_size; + } else { + split_info.subcore_size = 1; + } + /* order writes to split_info before kvm_split_mode pointer */ smp_wmb(); } - for (thr = 0; thr < controlled_threads; ++thr) - paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip; - /* Initiate micro-threading (split-core) if required */ + for (thr = 0; thr < controlled_threads; ++thr) { + struct paca_struct *paca = paca_ptrs[pcpu + thr]; + + paca->kvm_hstate.napping = 0; + paca->kvm_hstate.kvm_split_mode = sip; + } + + /* Initiate micro-threading (split-core) on POWER8 if required */ if (cmd_bit) { unsigned long hid0 = mfspr(SPRN_HID0); @@ -2755,17 +3930,44 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) } } + /* + * On POWER8, set RWMR register. + * Since it only affects PURR and SPURR, it doesn't affect + * the host, so we don't save/restore the host value. + */ + if (is_power8) { + unsigned long rwmr_val = RWMR_RPA_P8_8THREAD; + int n_online = atomic_read(&vc->online_count); + + /* + * Use the 8-thread value if we're doing split-core + * or if the vcore's online count looks bogus. + */ + if (split == 1 && threads_per_subcore == MAX_SMT_THREADS && + n_online >= 1 && n_online <= MAX_SMT_THREADS) + rwmr_val = p8_rwmr_values[n_online]; + mtspr(SPRN_RWMR, rwmr_val); + } + /* Start all the threads */ active = 0; for (sub = 0; sub < core_info.n_subcores; ++sub) { - thr = subcore_thread_map[sub]; + thr = is_power8 ? subcore_thread_map[sub] : sub; thr0_done = false; active |= 1 << thr; pvc = core_info.vc[sub]; pvc->pcpu = pcpu + thr; for_each_runnable_thread(i, vcpu, pvc) { + /* + * XXX: is kvmppc_start_thread called too late here? + * It updates vcpu->cpu and vcpu->arch.thread_cpu + * which are used by kvmppc_fast_vcpu_kick_hv(), but + * kick is called after new exceptions become available + * and exceptions are checked earlier than here, by + * kvmppc_core_prepare_to_enter. + */ kvmppc_start_thread(vcpu, pvc); - kvmppc_create_dtl_entry(vcpu, pvc); + kvmppc_update_vpa_dispatch(vcpu, pvc); trace_kvm_guest_enter(vcpu); if (!vcpu->arch.ptid) thr0_done = true; @@ -2777,7 +3979,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) */ if (!thr0_done) kvmppc_start_thread(NULL, pvc); - thr += pvc->num_threads; } /* @@ -2785,18 +3986,18 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) * the vcore pointer in the PACA of the secondaries. */ smp_mb(); - if (cmd_bit) - split_info.do_nap = 1; /* ask secondaries to nap when done */ /* * When doing micro-threading, poke the inactive threads as well. * This gets them to the nap instruction after kvm_do_nap, * which reduces the time taken to unsplit later. */ - if (split > 1) + if (cmd_bit) { + split_info.do_nap = 1; /* ask secondaries to nap when done */ for (thr = 1; thr < threads_per_subcore; ++thr) if (!(active & (1 << thr))) kvmppc_ipi_thread(pcpu + thr); + } vc->vcore_state = VCORE_RUNNING; preempt_disable(); @@ -2806,23 +4007,20 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) for (sub = 0; sub < core_info.n_subcores; ++sub) spin_unlock(&core_info.vc[sub]->lock); - /* - * Interrupts will be enabled once we get into the guest, - * so tell lockdep that we're about to enable interrupts. - */ - trace_hardirqs_on(); - - guest_enter(); + guest_timing_enter_irqoff(); srcu_idx = srcu_read_lock(&vc->kvm->srcu); + guest_state_enter_irqoff(); + this_cpu_disable_ftrace(); + trap = __kvmppc_vcore_entry(); - srcu_read_unlock(&vc->kvm->srcu, srcu_idx); + this_cpu_enable_ftrace(); + guest_state_exit_irqoff(); - guest_exit(); + srcu_read_unlock(&vc->kvm->srcu, srcu_idx); - trace_hardirqs_off(); set_irq_happened(trap); spin_lock(&vc->lock); @@ -2830,12 +4028,11 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) vc->vcore_state = VCORE_EXITING; /* wait for secondary threads to finish writing their state to memory */ - kvmppc_wait_for_nap(); + kvmppc_wait_for_nap(controlled_threads); /* Return to whole-core mode if we split the core earlier */ - if (split > 1) { + if (cmd_bit) { unsigned long hid0 = mfspr(SPRN_HID0); - unsigned long loops = 0; hid0 &= ~HID0_POWER8_DYNLPARDIS; stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE; @@ -2847,13 +4044,26 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) if (!(hid0 & stat_bit)) break; cpu_relax(); - ++loops; } split_info.do_nap = 0; } kvmppc_set_host_core(pcpu); + if (!vtime_accounting_enabled_this_cpu()) { + local_irq_enable(); + /* + * Service IRQs here before guest_timing_exit_irqoff() so any + * ticks that occurred while running the guest are accounted to + * the guest. If vtime accounting is enabled, accounting uses + * TB rather than ticks, so it can be done without enabling + * interrupts here, which has the problem that it accounts + * interrupt processing overhead to the host. + */ + local_irq_disable(); + } + guest_timing_exit_irqoff(); + local_irq_enable(); /* Let secondaries go back to the offline loop */ @@ -2861,7 +4071,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) kvmppc_release_hwthread(pcpu + i); if (sip && sip->napped[i]) kvmppc_ipi_thread(pcpu + i); - cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest); } spin_unlock(&vc->lock); @@ -2869,19 +4078,442 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) /* make sure updates to secondary vcpu structs are visible now */ smp_mb(); + preempt_enable(); + for (sub = 0; sub < core_info.n_subcores; ++sub) { pvc = core_info.vc[sub]; post_guest_process(pvc, pvc == vc); } spin_lock(&vc->lock); - preempt_enable(); out: vc->vcore_state = VCORE_INACTIVE; trace_kvmppc_run_core(vc, 1); } +static inline bool hcall_is_xics(unsigned long req) +{ + return req == H_EOI || req == H_CPPR || req == H_IPI || + req == H_IPOLL || req == H_XIRR || req == H_XIRR_X; +} + +static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu) +{ + struct lppaca *lp = vcpu->arch.vpa.pinned_addr; + if (lp) { + u32 yield_count = be32_to_cpu(lp->yield_count) + 1; + lp->yield_count = cpu_to_be32(yield_count); + vcpu->arch.vpa.dirty = 1; + } +} + +/* Helper functions for reading L2's stats from L1's VPA */ +#ifdef CONFIG_PPC_PSERIES +static DEFINE_PER_CPU(u64, l1_to_l2_cs); +static DEFINE_PER_CPU(u64, l2_to_l1_cs); +static DEFINE_PER_CPU(u64, l2_runtime_agg); + +int kvmhv_get_l2_counters_status(void) +{ + return firmware_has_feature(FW_FEATURE_LPAR) && + get_lppaca()->l2_counters_enable; +} + +void kvmhv_set_l2_counters_status(int cpu, bool status) +{ + if (!firmware_has_feature(FW_FEATURE_LPAR)) + return; + if (status) + lppaca_of(cpu).l2_counters_enable = 1; + else + lppaca_of(cpu).l2_counters_enable = 0; +} +EXPORT_SYMBOL(kvmhv_set_l2_counters_status); + +int kvmhv_counters_tracepoint_regfunc(void) +{ + int cpu; + + for_each_present_cpu(cpu) { + kvmhv_set_l2_counters_status(cpu, true); + } + return 0; +} + +void kvmhv_counters_tracepoint_unregfunc(void) +{ + int cpu; + + for_each_present_cpu(cpu) { + kvmhv_set_l2_counters_status(cpu, false); + } +} + +static void do_trace_nested_cs_time(struct kvm_vcpu *vcpu) +{ + struct lppaca *lp = get_lppaca(); + u64 l1_to_l2_ns, l2_to_l1_ns, l2_runtime_ns; + u64 *l1_to_l2_cs_ptr = this_cpu_ptr(&l1_to_l2_cs); + u64 *l2_to_l1_cs_ptr = this_cpu_ptr(&l2_to_l1_cs); + u64 *l2_runtime_agg_ptr = this_cpu_ptr(&l2_runtime_agg); + + l1_to_l2_ns = tb_to_ns(be64_to_cpu(lp->l1_to_l2_cs_tb)); + l2_to_l1_ns = tb_to_ns(be64_to_cpu(lp->l2_to_l1_cs_tb)); + l2_runtime_ns = tb_to_ns(be64_to_cpu(lp->l2_runtime_tb)); + trace_kvmppc_vcpu_stats(vcpu, l1_to_l2_ns - *l1_to_l2_cs_ptr, + l2_to_l1_ns - *l2_to_l1_cs_ptr, + l2_runtime_ns - *l2_runtime_agg_ptr); + *l1_to_l2_cs_ptr = l1_to_l2_ns; + *l2_to_l1_cs_ptr = l2_to_l1_ns; + *l2_runtime_agg_ptr = l2_runtime_ns; + vcpu->arch.l1_to_l2_cs = l1_to_l2_ns; + vcpu->arch.l2_to_l1_cs = l2_to_l1_ns; + vcpu->arch.l2_runtime_agg = l2_runtime_ns; +} + +u64 kvmhv_get_l1_to_l2_cs_time(void) +{ + return tb_to_ns(be64_to_cpu(get_lppaca()->l1_to_l2_cs_tb)); +} +EXPORT_SYMBOL(kvmhv_get_l1_to_l2_cs_time); + +u64 kvmhv_get_l2_to_l1_cs_time(void) +{ + return tb_to_ns(be64_to_cpu(get_lppaca()->l2_to_l1_cs_tb)); +} +EXPORT_SYMBOL(kvmhv_get_l2_to_l1_cs_time); + +u64 kvmhv_get_l2_runtime_agg(void) +{ + return tb_to_ns(be64_to_cpu(get_lppaca()->l2_runtime_tb)); +} +EXPORT_SYMBOL(kvmhv_get_l2_runtime_agg); + +u64 kvmhv_get_l1_to_l2_cs_time_vcpu(void) +{ + struct kvm_vcpu *vcpu; + struct kvm_vcpu_arch *arch; + + vcpu = local_paca->kvm_hstate.kvm_vcpu; + if (vcpu) { + arch = &vcpu->arch; + return arch->l1_to_l2_cs; + } else { + return 0; + } +} +EXPORT_SYMBOL(kvmhv_get_l1_to_l2_cs_time_vcpu); + +u64 kvmhv_get_l2_to_l1_cs_time_vcpu(void) +{ + struct kvm_vcpu *vcpu; + struct kvm_vcpu_arch *arch; + + vcpu = local_paca->kvm_hstate.kvm_vcpu; + if (vcpu) { + arch = &vcpu->arch; + return arch->l2_to_l1_cs; + } else { + return 0; + } +} +EXPORT_SYMBOL(kvmhv_get_l2_to_l1_cs_time_vcpu); + +u64 kvmhv_get_l2_runtime_agg_vcpu(void) +{ + struct kvm_vcpu *vcpu; + struct kvm_vcpu_arch *arch; + + vcpu = local_paca->kvm_hstate.kvm_vcpu; + if (vcpu) { + arch = &vcpu->arch; + return arch->l2_runtime_agg; + } else { + return 0; + } +} +EXPORT_SYMBOL(kvmhv_get_l2_runtime_agg_vcpu); + +#else +int kvmhv_get_l2_counters_status(void) +{ + return 0; +} + +static void do_trace_nested_cs_time(struct kvm_vcpu *vcpu) +{ +} +#endif + +static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit, + unsigned long lpcr, u64 *tb) +{ + struct kvmhv_nestedv2_io *io; + unsigned long msr, i; + int trap; + long rc; + + if (vcpu->arch.doorbell_request) { + vcpu->arch.doorbell_request = 0; + kvmppc_set_dpdes(vcpu, 1); + } + + io = &vcpu->arch.nestedv2_io; + + msr = mfmsr(); + kvmppc_msr_hard_disable_set_facilities(vcpu, msr); + if (lazy_irq_pending()) + return 0; + + rc = kvmhv_nestedv2_flush_vcpu(vcpu, time_limit); + if (rc < 0) + return -EINVAL; + + kvmppc_gse_put_u64(io->vcpu_run_input, KVMPPC_GSID_LPCR, lpcr); + + accumulate_time(vcpu, &vcpu->arch.in_guest); + rc = plpar_guest_run_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id, + &trap, &i); + + if (rc != H_SUCCESS) { + pr_err("KVM Guest Run VCPU hcall failed\n"); + if (rc == H_INVALID_ELEMENT_ID) + pr_err("KVM: Guest Run VCPU invalid element id at %ld\n", i); + else if (rc == H_INVALID_ELEMENT_SIZE) + pr_err("KVM: Guest Run VCPU invalid element size at %ld\n", i); + else if (rc == H_INVALID_ELEMENT_VALUE) + pr_err("KVM: Guest Run VCPU invalid element value at %ld\n", i); + return -EINVAL; + } + accumulate_time(vcpu, &vcpu->arch.guest_exit); + + *tb = mftb(); + kvmppc_gsm_reset(io->vcpu_message); + kvmppc_gsm_reset(io->vcore_message); + kvmppc_gsbm_zero(&io->valids); + + rc = kvmhv_nestedv2_parse_output(vcpu); + if (rc < 0) + return -EINVAL; + + timer_rearm_host_dec(*tb); + + /* Record context switch and guest_run_time data */ + if (kvmhv_get_l2_counters_status()) + do_trace_nested_cs_time(vcpu); + + return trap; +} + +/* call our hypervisor to load up HV regs and go */ +static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb) +{ + unsigned long host_psscr; + unsigned long msr; + struct hv_guest_state hvregs; + struct p9_host_os_sprs host_os_sprs; + s64 dec; + int trap; + + msr = mfmsr(); + + save_p9_host_os_sprs(&host_os_sprs); + + /* + * We need to save and restore the guest visible part of the + * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor + * doesn't do this for us. Note only required if pseries since + * this is done in kvmhv_vcpu_entry_p9() below otherwise. + */ + host_psscr = mfspr(SPRN_PSSCR_PR); + + kvmppc_msr_hard_disable_set_facilities(vcpu, msr); + if (lazy_irq_pending()) + return 0; + + if (unlikely(load_vcpu_state(vcpu, &host_os_sprs))) + msr = mfmsr(); /* TM restore can update msr */ + + if (vcpu->arch.psscr != host_psscr) + mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); + + kvmhv_save_hv_regs(vcpu, &hvregs); + hvregs.lpcr = lpcr; + hvregs.amor = ~0; + vcpu->arch.regs.msr = vcpu->arch.shregs.msr; + hvregs.version = HV_GUEST_STATE_VERSION; + if (vcpu->arch.nested) { + hvregs.lpid = vcpu->arch.nested->shadow_lpid; + hvregs.vcpu_token = vcpu->arch.nested_vcpu_id; + } else { + hvregs.lpid = vcpu->kvm->arch.lpid; + hvregs.vcpu_token = vcpu->vcpu_id; + } + hvregs.hdec_expiry = time_limit; + + /* + * hvregs has the doorbell status, so zero it here which + * enables us to receive doorbells when H_ENTER_NESTED is + * in progress for this vCPU + */ + + if (vcpu->arch.doorbell_request) + vcpu->arch.doorbell_request = 0; + + /* + * When setting DEC, we must always deal with irq_work_raise + * via NMI vs setting DEC. The problem occurs right as we + * switch into guest mode if a NMI hits and sets pending work + * and sets DEC, then that will apply to the guest and not + * bring us back to the host. + * + * irq_work_raise could check a flag (or possibly LPCR[HDICE] + * for example) and set HDEC to 1? That wouldn't solve the + * nested hv case which needs to abort the hcall or zero the + * time limit. + * + * XXX: Another day's problem. + */ + mtspr(SPRN_DEC, kvmppc_dec_expires_host_tb(vcpu) - *tb); + + mtspr(SPRN_DAR, vcpu->arch.shregs.dar); + mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); + switch_pmu_to_guest(vcpu, &host_os_sprs); + accumulate_time(vcpu, &vcpu->arch.in_guest); + trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs), + __pa(&vcpu->arch.regs)); + accumulate_time(vcpu, &vcpu->arch.guest_exit); + kvmhv_restore_hv_return_state(vcpu, &hvregs); + switch_pmu_to_host(vcpu, &host_os_sprs); + vcpu->arch.shregs.msr = vcpu->arch.regs.msr; + vcpu->arch.shregs.dar = mfspr(SPRN_DAR); + vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); + vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); + + store_vcpu_state(vcpu); + + dec = mfspr(SPRN_DEC); + if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */ + dec = (s32) dec; + *tb = mftb(); + vcpu->arch.dec_expires = dec + (*tb + kvmppc_get_tb_offset(vcpu)); + + timer_rearm_host_dec(*tb); + + restore_p9_host_os_sprs(vcpu, &host_os_sprs); + if (vcpu->arch.psscr != host_psscr) + mtspr(SPRN_PSSCR_PR, host_psscr); + + return trap; +} + +/* + * Guest entry for POWER9 and later CPUs. + */ +static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, + unsigned long lpcr, u64 *tb) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_nested_guest *nested = vcpu->arch.nested; + u64 next_timer; + int trap; + + next_timer = timer_get_next_tb(); + if (*tb >= next_timer) + return BOOK3S_INTERRUPT_HV_DECREMENTER; + if (next_timer < time_limit) + time_limit = next_timer; + else if (*tb >= time_limit) /* nested time limit */ + return BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER; + + vcpu->arch.ceded = 0; + + vcpu_vpa_increment_dispatch(vcpu); + + if (kvmhv_on_pseries()) { + if (kvmhv_is_nestedv1()) + trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb); + else + trap = kvmhv_vcpu_entry_nestedv2(vcpu, time_limit, lpcr, tb); + + /* H_CEDE has to be handled now, not later */ + if (trap == BOOK3S_INTERRUPT_SYSCALL && !nested && + kvmppc_get_gpr(vcpu, 3) == H_CEDE) { + kvmppc_cede(vcpu); + kvmppc_set_gpr(vcpu, 3, 0); + trap = 0; + } + + } else if (nested) { + __this_cpu_write(cpu_in_guest, kvm); + trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); + __this_cpu_write(cpu_in_guest, NULL); + + } else { + kvmppc_xive_push_vcpu(vcpu); + + __this_cpu_write(cpu_in_guest, kvm); + trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); + __this_cpu_write(cpu_in_guest, NULL); + + if (trap == BOOK3S_INTERRUPT_SYSCALL && + !(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) { + unsigned long req = kvmppc_get_gpr(vcpu, 3); + + /* + * XIVE rearm and XICS hcalls must be handled + * before xive context is pulled (is this + * true?) + */ + if (req == H_CEDE) { + /* H_CEDE has to be handled now */ + kvmppc_cede(vcpu); + if (!kvmppc_xive_rearm_escalation(vcpu)) { + /* + * Pending escalation so abort + * the cede. + */ + vcpu->arch.ceded = 0; + } + kvmppc_set_gpr(vcpu, 3, 0); + trap = 0; + + } else if (req == H_ENTER_NESTED) { + /* + * L2 should not run with the L1 + * context so rearm and pull it. + */ + if (!kvmppc_xive_rearm_escalation(vcpu)) { + /* + * Pending escalation so abort + * H_ENTER_NESTED. + */ + kvmppc_set_gpr(vcpu, 3, 0); + trap = 0; + } + + } else if (hcall_is_xics(req)) { + int ret; + + ret = kvmppc_xive_xics_hcall(vcpu, req); + if (ret != H_TOO_HARD) { + kvmppc_set_gpr(vcpu, 3, ret); + trap = 0; + } + } + } + kvmppc_xive_pull_vcpu(vcpu); + + if (kvm_is_radix(kvm)) + vcpu->arch.slb_max = 0; + } + + vcpu_vpa_increment_dispatch(vcpu); + + return trap; +} + /* * Wait for some other vcpu thread to execute us, and * wake us up when we need to handle something in the host. @@ -2902,11 +4534,12 @@ static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc, static void grow_halt_poll_ns(struct kvmppc_vcore *vc) { - /* 10us base */ - if (vc->halt_poll_ns == 0 && halt_poll_ns_grow) - vc->halt_poll_ns = 10000; - else - vc->halt_poll_ns *= halt_poll_ns_grow; + if (!halt_poll_ns_grow) + return; + + vc->halt_poll_ns *= halt_poll_ns_grow; + if (vc->halt_poll_ns < halt_poll_ns_grow_start) + vc->halt_poll_ns = halt_poll_ns_grow_start; } static void shrink_halt_poll_ns(struct kvmppc_vcore *vc) @@ -2920,9 +4553,9 @@ static void shrink_halt_poll_ns(struct kvmppc_vcore *vc) #ifdef CONFIG_KVM_XICS static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu) { - if (!xive_enabled()) + if (!xics_on_xive()) return false; - return vcpu->arch.xive_saved_state.pipr < + return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < vcpu->arch.xive_saved_state.cppr; } #else @@ -2941,6 +4574,13 @@ static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu) return false; } +static bool kvmppc_vcpu_check_block(struct kvm_vcpu *vcpu) +{ + if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) + return true; + return false; +} + /* * Check to see if any of the runnable vcpus on the vcore have pending * exceptions or are no longer ceded @@ -2951,7 +4591,7 @@ static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc) int i; for_each_runnable_thread(i, vcpu, vc) { - if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) + if (kvmppc_vcpu_check_block(vcpu)) return 1; } @@ -2967,13 +4607,14 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) ktime_t cur, start_poll, start_wait; int do_sleep = 1; u64 block_ns; - DECLARE_SWAITQUEUE(wait); + + WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); /* Poll for pending exceptions and ceded state */ cur = start_poll = ktime_get(); if (vc->halt_poll_ns) { ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns); - ++vc->runner->stat.halt_attempted_poll; + ++vc->runner->stat.generic.halt_attempted_poll; vc->vcore_state = VCORE_POLLING; spin_unlock(&vc->lock); @@ -2984,38 +4625,38 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) break; } cur = ktime_get(); - } while (single_task_running() && ktime_before(cur, stop)); + } while (kvm_vcpu_can_poll(cur, stop)); spin_lock(&vc->lock); vc->vcore_state = VCORE_INACTIVE; if (!do_sleep) { - ++vc->runner->stat.halt_successful_poll; + ++vc->runner->stat.generic.halt_successful_poll; goto out; } } - prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE); - + prepare_to_rcuwait(&vc->wait); + set_current_state(TASK_INTERRUPTIBLE); if (kvmppc_vcore_check_block(vc)) { - finish_swait(&vc->wq, &wait); + finish_rcuwait(&vc->wait); do_sleep = 0; /* If we polled, count this as a successful poll */ if (vc->halt_poll_ns) - ++vc->runner->stat.halt_successful_poll; + ++vc->runner->stat.generic.halt_successful_poll; goto out; } start_wait = ktime_get(); vc->vcore_state = VCORE_SLEEPING; - trace_kvmppc_vcore_blocked(vc, 0); + trace_kvmppc_vcore_blocked(vc->runner, 0); spin_unlock(&vc->lock); schedule(); - finish_swait(&vc->wq, &wait); + finish_rcuwait(&vc->wait); spin_lock(&vc->lock); vc->vcore_state = VCORE_INACTIVE; - trace_kvmppc_vcore_blocked(vc, 1); + trace_kvmppc_vcore_blocked(vc->runner, 1); ++vc->runner->stat.halt_successful_wait; cur = ktime_get(); @@ -3025,19 +4666,31 @@ out: /* Attribute wait time */ if (do_sleep) { - vc->runner->stat.halt_wait_ns += + vc->runner->stat.generic.halt_wait_ns += ktime_to_ns(cur) - ktime_to_ns(start_wait); + KVM_STATS_LOG_HIST_UPDATE( + vc->runner->stat.generic.halt_wait_hist, + ktime_to_ns(cur) - ktime_to_ns(start_wait)); /* Attribute failed poll time */ - if (vc->halt_poll_ns) - vc->runner->stat.halt_poll_fail_ns += + if (vc->halt_poll_ns) { + vc->runner->stat.generic.halt_poll_fail_ns += ktime_to_ns(start_wait) - ktime_to_ns(start_poll); + KVM_STATS_LOG_HIST_UPDATE( + vc->runner->stat.generic.halt_poll_fail_hist, + ktime_to_ns(start_wait) - + ktime_to_ns(start_poll)); + } } else { /* Attribute successful poll time */ - if (vc->halt_poll_ns) - vc->runner->stat.halt_poll_success_ns += + if (vc->halt_poll_ns) { + vc->runner->stat.generic.halt_poll_success_ns += ktime_to_ns(cur) - ktime_to_ns(start_poll); + KVM_STATS_LOG_HIST_UPDATE( + vc->runner->stat.generic.halt_poll_success_hist, + ktime_to_ns(cur) - ktime_to_ns(start_poll)); + } } /* Adjust poll time */ @@ -3059,15 +4712,39 @@ out: trace_kvmppc_vcore_wakeup(do_sleep, block_ns); } -static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) +/* + * This never fails for a radix guest, as none of the operations it does + * for a radix guest can fail or have a way to report failure. + */ +static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu) +{ + int r = 0; + struct kvm *kvm = vcpu->kvm; + + mutex_lock(&kvm->arch.mmu_setup_lock); + if (!kvm->arch.mmu_ready) { + if (!kvm_is_radix(kvm)) + r = kvmppc_hv_setup_htab_rma(vcpu); + if (!r) { + if (cpu_has_feature(CPU_FTR_ARCH_300)) + kvmppc_setup_partition_table(kvm); + kvm->arch.mmu_ready = 1; + } + } + mutex_unlock(&kvm->arch.mmu_setup_lock); + return r; +} + +static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu) { - int n_ceded, i; + struct kvm_run *run = vcpu->run; + int n_ceded, i, r; struct kvmppc_vcore *vc; struct kvm_vcpu *v; trace_kvmppc_run_vcpu_enter(vcpu); - kvm_run->exit_reason = 0; + run->exit_reason = 0; vcpu->arch.ret = RESUME_GUEST; vcpu->arch.trap = 0; kvmppc_update_vpas(vcpu); @@ -3079,7 +4756,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) spin_lock(&vc->lock); vcpu->arch.ceded = 0; vcpu->arch.run_task = current; - vcpu->arch.kvm_run = kvm_run; vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; vcpu->arch.busy_preempt = TB_NIL; @@ -3092,29 +4768,34 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) * this thread straight away and have it join in. */ if (!signal_pending(current)) { - if (vc->vcore_state == VCORE_PIGGYBACK) { - if (spin_trylock(&vc->lock)) { - if (vc->vcore_state == VCORE_RUNNING && - !VCORE_IS_EXITING(vc)) { - kvmppc_create_dtl_entry(vcpu, vc); - kvmppc_start_thread(vcpu, vc); - trace_kvm_guest_enter(vcpu); - } - spin_unlock(&vc->lock); - } - } else if (vc->vcore_state == VCORE_RUNNING && + if ((vc->vcore_state == VCORE_PIGGYBACK || + vc->vcore_state == VCORE_RUNNING) && !VCORE_IS_EXITING(vc)) { - kvmppc_create_dtl_entry(vcpu, vc); + kvmppc_update_vpa_dispatch(vcpu, vc); kvmppc_start_thread(vcpu, vc); trace_kvm_guest_enter(vcpu); } else if (vc->vcore_state == VCORE_SLEEPING) { - swake_up(&vc->wq); + rcuwait_wake_up(&vc->wait); } } while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && !signal_pending(current)) { + /* See if the MMU is ready to go */ + if (!vcpu->kvm->arch.mmu_ready) { + spin_unlock(&vc->lock); + r = kvmhv_setup_mmu(vcpu); + spin_lock(&vc->lock); + if (r) { + run->exit_reason = KVM_EXIT_FAIL_ENTRY; + run->fail_entry. + hardware_entry_failure_reason = 0; + vcpu->arch.ret = r; + break; + } + } + if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) kvmppc_vcore_end_preempt(vc); @@ -3125,9 +4806,9 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) for_each_runnable_thread(i, v, vc) { kvmppc_core_prepare_to_enter(v); if (signal_pending(v->arch.run_task)) { - kvmppc_remove_runnable(vc, v); + kvmppc_remove_runnable(vc, v, mftb()); v->stat.signal_exits++; - v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; + v->run->exit_reason = KVM_EXIT_INTR; v->arch.ret = -EINTR; wake_up(&v->arch.cpu_run); } @@ -3166,9 +4847,9 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) kvmppc_vcore_end_preempt(vc); if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { - kvmppc_remove_runnable(vc, vcpu); + kvmppc_remove_runnable(vc, vcpu, mftb()); vcpu->stat.signal_exits++; - kvm_run->exit_reason = KVM_EXIT_INTR; + run->exit_reason = KVM_EXIT_INTR; vcpu->arch.ret = -EINTR; } @@ -3179,31 +4860,254 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) wake_up(&v->arch.cpu_run); } - trace_kvmppc_run_vcpu_exit(vcpu, kvm_run); + trace_kvmppc_run_vcpu_exit(vcpu); spin_unlock(&vc->lock); return vcpu->arch.ret; } -static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) +int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, + unsigned long lpcr) +{ + struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); + struct kvm_run *run = vcpu->run; + int trap, r, pcpu; + int srcu_idx; + struct kvmppc_vcore *vc; + struct kvm *kvm = vcpu->kvm; + struct kvm_nested_guest *nested = vcpu->arch.nested; + unsigned long flags; + u64 tb; + + trace_kvmppc_run_vcpu_enter(vcpu); + + run->exit_reason = 0; + vcpu->arch.ret = RESUME_GUEST; + vcpu->arch.trap = 0; + + vc = vcpu->arch.vcore; + vcpu->arch.ceded = 0; + vcpu->arch.run_task = current; + vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; + + /* See if the MMU is ready to go */ + if (unlikely(!kvm->arch.mmu_ready)) { + r = kvmhv_setup_mmu(vcpu); + if (r) { + run->exit_reason = KVM_EXIT_FAIL_ENTRY; + run->fail_entry.hardware_entry_failure_reason = 0; + vcpu->arch.ret = r; + return r; + } + } + + if (need_resched()) + cond_resched(); + + kvmppc_update_vpas(vcpu); + + preempt_disable(); + pcpu = smp_processor_id(); + if (kvm_is_radix(kvm)) + kvmppc_prepare_radix_vcpu(vcpu, pcpu); + + /* flags save not required, but irq_pmu has no disable/enable API */ + powerpc_local_irq_pmu_save(flags); + + vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; + + if (signal_pending(current)) + goto sigpend; + if (need_resched() || !kvm->arch.mmu_ready) + goto out; + + vcpu->cpu = pcpu; + vcpu->arch.thread_cpu = pcpu; + vc->pcpu = pcpu; + local_paca->kvm_hstate.kvm_vcpu = vcpu; + local_paca->kvm_hstate.ptid = 0; + local_paca->kvm_hstate.fake_suspend = 0; + + /* + * Orders set cpu/thread_cpu vs testing for pending interrupts and + * doorbells below. The other side is when these fields are set vs + * kvmppc_fast_vcpu_kick_hv reading the cpu/thread_cpu fields to + * kick a vCPU to notice the pending interrupt. + */ + smp_mb(); + + if (!nested) { + kvmppc_core_prepare_to_enter(vcpu); + if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, + &vcpu->arch.pending_exceptions) || + xive_interrupt_pending(vcpu)) { + /* + * For nested HV, don't synthesize but always pass MER, + * the L0 will be able to optimise that more + * effectively than manipulating registers directly. + */ + if (!kvmhv_on_pseries() && (__kvmppc_get_msr_hv(vcpu) & MSR_EE)) + kvmppc_inject_interrupt_hv(vcpu, + BOOK3S_INTERRUPT_EXTERNAL, 0); + else + lpcr |= LPCR_MER; + } else { + /* + * L1's copy of L2's LPCR (vcpu->arch.vcore->lpcr) can get its MER bit + * unexpectedly set - for e.g. during NMI handling when all register + * states are synchronized from L0 to L1. L1 needs to inform L0 about + * MER=1 only when there are pending external interrupts. + * In the above if check, MER bit is set if there are pending + * external interrupts. Hence, explicitly mask off MER bit + * here as otherwise it may generate spurious interrupts in L2 KVM + * causing an endless loop, which results in L2 guest getting hung. + */ + lpcr &= ~LPCR_MER; + } + } else if (vcpu->arch.pending_exceptions || + xive_interrupt_pending(vcpu)) { + vcpu->arch.ret = RESUME_HOST; + goto out; + } + + if (vcpu->arch.timer_running) { + hrtimer_try_to_cancel(&vcpu->arch.dec_timer); + vcpu->arch.timer_running = 0; + } + + tb = mftb(); + + kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + kvmppc_get_tb_offset(vcpu)); + + trace_kvm_guest_enter(vcpu); + + guest_timing_enter_irqoff(); + + srcu_idx = srcu_read_lock(&kvm->srcu); + + guest_state_enter_irqoff(); + this_cpu_disable_ftrace(); + + trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr, &tb); + vcpu->arch.trap = trap; + + this_cpu_enable_ftrace(); + guest_state_exit_irqoff(); + + srcu_read_unlock(&kvm->srcu, srcu_idx); + + set_irq_happened(trap); + + vcpu->cpu = -1; + vcpu->arch.thread_cpu = -1; + vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; + + if (!vtime_accounting_enabled_this_cpu()) { + powerpc_local_irq_pmu_restore(flags); + /* + * Service IRQs here before guest_timing_exit_irqoff() so any + * ticks that occurred while running the guest are accounted to + * the guest. If vtime accounting is enabled, accounting uses + * TB rather than ticks, so it can be done without enabling + * interrupts here, which has the problem that it accounts + * interrupt processing overhead to the host. + */ + powerpc_local_irq_pmu_save(flags); + } + guest_timing_exit_irqoff(); + + powerpc_local_irq_pmu_restore(flags); + + preempt_enable(); + + /* + * cancel pending decrementer exception if DEC is now positive, or if + * entering a nested guest in which case the decrementer is now owned + * by L2 and the L1 decrementer is provided in hdec_expires + */ + if (kvmppc_core_pending_dec(vcpu) && + ((tb < kvmppc_dec_expires_host_tb(vcpu)) || + (trap == BOOK3S_INTERRUPT_SYSCALL && + kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED))) + kvmppc_core_dequeue_dec(vcpu); + + trace_kvm_guest_exit(vcpu); + r = RESUME_GUEST; + if (trap) { + if (!nested) + r = kvmppc_handle_exit_hv(vcpu, current); + else + r = kvmppc_handle_nested_exit(vcpu); + } + vcpu->arch.ret = r; + + if (is_kvmppc_resume_guest(r) && !kvmppc_vcpu_check_block(vcpu)) { + kvmppc_set_timer(vcpu); + + prepare_to_rcuwait(wait); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (signal_pending(current)) { + vcpu->stat.signal_exits++; + run->exit_reason = KVM_EXIT_INTR; + vcpu->arch.ret = -EINTR; + break; + } + + if (kvmppc_vcpu_check_block(vcpu)) + break; + + trace_kvmppc_vcore_blocked(vcpu, 0); + schedule(); + trace_kvmppc_vcore_blocked(vcpu, 1); + } + finish_rcuwait(wait); + } + vcpu->arch.ceded = 0; + + done: + trace_kvmppc_run_vcpu_exit(vcpu); + + return vcpu->arch.ret; + + sigpend: + vcpu->stat.signal_exits++; + run->exit_reason = KVM_EXIT_INTR; + vcpu->arch.ret = -EINTR; + out: + vcpu->cpu = -1; + vcpu->arch.thread_cpu = -1; + vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; + powerpc_local_irq_pmu_restore(flags); + preempt_enable(); + goto done; +} + +static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) { + struct kvm_run *run = vcpu->run; int r; int srcu_idx; - unsigned long ebb_regs[3] = {}; /* shut up GCC */ - unsigned long user_tar = 0; - unsigned int user_vrsave; + struct kvm *kvm; + unsigned long msr; + + start_timing(vcpu, &vcpu->arch.vcpu_entry); if (!vcpu->arch.sane) { run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return -EINVAL; } + /* No need to go into the guest when all we'll do is come back out */ + if (signal_pending(current)) { + run->exit_reason = KVM_EXIT_INTR; + return -EINTR; + } + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Don't allow entry with a suspended transaction, because * the guest entry/exit code will lose it. - * If the guest has TM enabled, save away their TM-related SPRs - * (they will get restored by the TM unavailable interrupt). */ -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && (current->thread.regs->msr & MSR_TM)) { if (MSR_TM_ACTIVE(current->thread.regs->msr)) { @@ -3211,104 +5115,113 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) run->fail_entry.hardware_entry_failure_reason = 0; return -EINVAL; } - /* Enable TM so we can read the TM SPRs */ - mtmsr(mfmsr() | MSR_TM); - current->thread.tm_tfhar = mfspr(SPRN_TFHAR); - current->thread.tm_tfiar = mfspr(SPRN_TFIAR); - current->thread.tm_texasr = mfspr(SPRN_TEXASR); - current->thread.regs->msr &= ~MSR_TM; } #endif - kvmppc_core_prepare_to_enter(vcpu); - - /* No need to go into the guest when all we'll do is come back out */ - if (signal_pending(current)) { - run->exit_reason = KVM_EXIT_INTR; - return -EINTR; + /* + * Force online to 1 for the sake of old userspace which doesn't + * set it. + */ + if (!vcpu->arch.online) { + atomic_inc(&vcpu->arch.vcore->online_count); + vcpu->arch.online = 1; } - atomic_inc(&vcpu->kvm->arch.vcpus_running); - /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */ + kvmppc_core_prepare_to_enter(vcpu); + + kvm = vcpu->kvm; + atomic_inc(&kvm->arch.vcpus_running); + /* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */ smp_mb(); - /* On the first time here, set up HTAB and VRMA */ - if (!kvm_is_radix(vcpu->kvm) && !vcpu->kvm->arch.hpte_setup_done) { - r = kvmppc_hv_setup_htab_rma(vcpu); - if (r) - goto out; - } + msr = 0; + if (IS_ENABLED(CONFIG_PPC_FPU)) + msr |= MSR_FP; + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + msr |= MSR_VEC; + if (cpu_has_feature(CPU_FTR_VSX)) + msr |= MSR_VSX; + if ((cpu_has_feature(CPU_FTR_TM) || + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) && + (kvmppc_get_hfscr_hv(vcpu) & HFSCR_TM)) + msr |= MSR_TM; + msr = msr_check_and_set(msr); - flush_all_to_thread(current); + kvmppc_save_user_regs(); - /* Save userspace EBB and other register values */ - if (cpu_has_feature(CPU_FTR_ARCH_207S)) { - ebb_regs[0] = mfspr(SPRN_EBBHR); - ebb_regs[1] = mfspr(SPRN_EBBRR); - ebb_regs[2] = mfspr(SPRN_BESCR); - user_tar = mfspr(SPRN_TAR); - } - user_vrsave = mfspr(SPRN_VRSAVE); + kvmppc_save_current_sprs(); - vcpu->arch.wqp = &vcpu->arch.vcore->wq; - vcpu->arch.pgdir = current->mm->pgd; + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + vcpu->arch.waitp = &vcpu->arch.vcore->wait; + vcpu->arch.pgdir = kvm->mm->pgd; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; do { - r = kvmppc_run_vcpu(run, vcpu); - - if (run->exit_reason == KVM_EXIT_PAPR_HCALL && - !(vcpu->arch.shregs.msr & MSR_PR)) { + accumulate_time(vcpu, &vcpu->arch.guest_entry); + if (cpu_has_feature(CPU_FTR_ARCH_300)) + r = kvmhv_run_single_vcpu(vcpu, ~(u64)0, + vcpu->arch.vcore->lpcr); + else + r = kvmppc_run_vcpu(vcpu); + + if (run->exit_reason == KVM_EXIT_PAPR_HCALL) { + accumulate_time(vcpu, &vcpu->arch.hcall); + + if (!kvmhv_is_nestedv2() && WARN_ON_ONCE(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) { + /* + * These should have been caught reflected + * into the guest by now. Final sanity check: + * don't allow userspace to execute hcalls in + * the hypervisor. + */ + r = RESUME_GUEST; + continue; + } trace_kvm_hcall_enter(vcpu); r = kvmppc_pseries_do_hcall(vcpu); trace_kvm_hcall_exit(vcpu, r); kvmppc_core_prepare_to_enter(vcpu); } else if (r == RESUME_PAGE_FAULT) { - srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); - r = kvmppc_book3s_hv_page_fault(run, vcpu, + accumulate_time(vcpu, &vcpu->arch.pg_fault); + srcu_idx = srcu_read_lock(&kvm->srcu); + r = kvmppc_book3s_hv_page_fault(vcpu, vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); - srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); + srcu_read_unlock(&kvm->srcu, srcu_idx); } else if (r == RESUME_PASSTHROUGH) { - if (WARN_ON(xive_enabled())) + if (WARN_ON(xics_on_xive())) r = H_SUCCESS; else r = kvmppc_xics_rm_complete(vcpu, 0); } } while (is_kvmppc_resume_guest(r)); + accumulate_time(vcpu, &vcpu->arch.vcpu_exit); - /* Restore userspace EBB and other register values */ - if (cpu_has_feature(CPU_FTR_ARCH_207S)) { - mtspr(SPRN_EBBHR, ebb_regs[0]); - mtspr(SPRN_EBBRR, ebb_regs[1]); - mtspr(SPRN_BESCR, ebb_regs[2]); - mtspr(SPRN_TAR, user_tar); - mtspr(SPRN_FSCR, current->thread.fscr); - } - mtspr(SPRN_VRSAVE, user_vrsave); - - out: vcpu->arch.state = KVMPPC_VCPU_NOTREADY; - atomic_dec(&vcpu->kvm->arch.vcpus_running); + atomic_dec(&kvm->arch.vcpus_running); + + srr_regs_clobbered(); + + end_timing(vcpu); + return r; } static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, - int linux_psize) + int shift, int sllp) { - struct mmu_psize_def *def = &mmu_psize_defs[linux_psize]; - - if (!def->shift) - return; - (*sps)->page_shift = def->shift; - (*sps)->slb_enc = def->sllp; - (*sps)->enc[0].page_shift = def->shift; - (*sps)->enc[0].pte_enc = def->penc[linux_psize]; + (*sps)->page_shift = shift; + (*sps)->slb_enc = sllp; + (*sps)->enc[0].page_shift = shift; + (*sps)->enc[0].pte_enc = kvmppc_pgsize_lp_encoding(shift, shift); /* - * Add 16MB MPSS support if host supports it + * Add 16MB MPSS support (may get filtered out by userspace) */ - if (linux_psize != MMU_PAGE_16M && def->penc[MMU_PAGE_16M] != -1) { - (*sps)->enc[1].page_shift = 24; - (*sps)->enc[1].pte_enc = def->penc[MMU_PAGE_16M]; + if (shift != 24) { + int penc = kvmppc_pgsize_lp_encoding(shift, 24); + if (penc != -1) { + (*sps)->enc[1].page_shift = 24; + (*sps)->enc[1].pte_enc = penc; + } } (*sps)++; } @@ -3319,22 +5232,26 @@ static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm, struct kvm_ppc_one_seg_page_size *sps; /* - * Since we don't yet support HPT guests on a radix host, - * return an error if the host uses radix. + * POWER7, POWER8 and POWER9 all support 32 storage keys for data. + * POWER7 doesn't support keys for instruction accesses, + * POWER8 and POWER9 do. */ - if (radix_enabled()) - return -EINVAL; + info->data_keys = 32; + info->instr_keys = cpu_has_feature(CPU_FTR_ARCH_207S) ? 32 : 0; - info->flags = KVM_PPC_PAGE_SIZES_REAL; - if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) - info->flags |= KVM_PPC_1T_SEGMENTS; - info->slb_size = mmu_slb_size; + /* POWER7, 8 and 9 all have 1T segments and 32-entry SLB */ + info->flags = KVM_PPC_PAGE_SIZES_REAL | KVM_PPC_1T_SEGMENTS; + info->slb_size = 32; /* We only support these sizes for now, and no muti-size segments */ sps = &info->sps[0]; - kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K); - kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K); - kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M); + kvmppc_add_seg_page_size(&sps, 12, 0); + kvmppc_add_seg_page_size(&sps, 16, SLB_VSID_L | SLB_VSID_LP_01); + kvmppc_add_seg_page_size(&sps, 24, SLB_VSID_L); + + /* If running as a nested hypervisor, we don't support HPT guests */ + if (kvmhv_on_pseries()) + info->flags |= KVM_PPC_NO_HASH; return 0; } @@ -3347,9 +5264,9 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm, { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - int i, r; - unsigned long n; - unsigned long *buf; + int r; + unsigned long n, i; + unsigned long *buf, *p; struct kvm_vcpu *vcpu; mutex_lock(&kvm->slots_lock); @@ -3361,12 +5278,12 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm, slots = kvm_memslots(kvm); memslot = id_to_memslot(slots, log->slot); r = -ENOENT; - if (!memslot->dirty_bitmap) + if (!memslot || !memslot->dirty_bitmap) goto out; /* - * Use second half of bitmap area because radix accumulates - * bits in the first half. + * Use second half of bitmap area because both HPT and radix + * accumulate bits in the first half. */ n = kvm_dirty_bitmap_bytes(memslot); buf = memslot->dirty_bitmap + n / sizeof(long); @@ -3379,6 +5296,16 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm, if (r) goto out; + /* + * We accumulate dirty bits in the first half of the + * memslot's dirty_bitmap area, for when pages are paged + * out or modified by the host directly. Pick up these + * bits and add them to the map. + */ + p = memslot->dirty_bitmap; + for (i = 0; i < n / sizeof(long); ++i) + buf[i] |= xchg(&p[i], 0); + /* Harvest dirty bits from VPA and DTL updates */ /* Note: we never modify the SLB shadow buffer areas */ kvm_for_each_vcpu(i, vcpu, kvm) { @@ -3398,75 +5325,90 @@ out: return r; } -static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free, - struct kvm_memory_slot *dont) +static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *slot) { - if (!dont || free->arch.rmap != dont->arch.rmap) { - vfree(free->arch.rmap); - free->arch.rmap = NULL; - } + vfree(slot->arch.rmap); + slot->arch.rmap = NULL; } -static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot, - unsigned long npages) +static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) { - /* - * For now, if radix_enabled() then we only support radix guests, - * and in that case we don't need the rmap array. - */ - if (radix_enabled()) { - slot->arch.rmap = NULL; - return 0; - } + if (change == KVM_MR_CREATE) { + unsigned long size = array_size(new->npages, sizeof(*new->arch.rmap)); - slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); - if (!slot->arch.rmap) - return -ENOMEM; + if ((size >> PAGE_SHIFT) > totalram_pages()) + return -ENOMEM; - return 0; -} + new->arch.rmap = vzalloc(size); + if (!new->arch.rmap) + return -ENOMEM; + } else if (change != KVM_MR_DELETE) { + new->arch.rmap = old->arch.rmap; + } -static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, - struct kvm_memory_slot *memslot, - const struct kvm_userspace_memory_region *mem) -{ return 0; } static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, - const struct kvm_memory_slot *new) + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) { - unsigned long npages = mem->memory_size >> PAGE_SHIFT; - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - /* - * If we are making a new memslot, it might make + * If we are creating or modifying a memslot, it might make * some address that was previously cached as emulated * MMIO be no longer emulated MMIO, so invalidate * all the caches of emulated MMIO translations. */ - if (npages) + if (change != KVM_MR_DELETE) atomic64_inc(&kvm->arch.mmio_update); - if (npages && old->npages && !kvm_is_radix(kvm)) { + /* + * For change == KVM_MR_MOVE or KVM_MR_DELETE, higher levels + * have already called kvm_arch_flush_shadow_memslot() to + * flush shadow mappings. For KVM_MR_CREATE we have no + * previous mappings. So the only case to handle is + * KVM_MR_FLAGS_ONLY when the KVM_MEM_LOG_DIRTY_PAGES bit + * has been changed. + * For radix guests, we flush on setting KVM_MEM_LOG_DIRTY_PAGES + * to get rid of any THP PTEs in the partition-scoped page tables + * so we can track dirtiness at the page level; we flush when + * clearing KVM_MEM_LOG_DIRTY_PAGES so that we can go back to + * using THP PTEs. + */ + if (change == KVM_MR_FLAGS_ONLY && kvm_is_radix(kvm) && + ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES)) + kvmppc_radix_flush_memslot(kvm, old); + /* + * If UV hasn't yet called H_SVM_INIT_START, don't register memslots. + */ + if (!kvm->arch.secure_guest) + return; + + switch (change) { + case KVM_MR_CREATE: /* - * If modifying a memslot, reset all the rmap dirty bits. - * If this is a new memslot, we don't need to do anything - * since the rmap array starts out as all zeroes, - * i.e. no pages are dirty. + * @TODO kvmppc_uvmem_memslot_create() can fail and + * return error. Fix this. */ - slots = kvm_memslots(kvm); - memslot = id_to_memslot(slots, mem->slot); - kvmppc_hv_get_dirty_log_hpt(kvm, memslot, NULL); + kvmppc_uvmem_memslot_create(kvm, new); + break; + case KVM_MR_DELETE: + kvmppc_uvmem_memslot_delete(kvm, old); + break; + default: + /* TODO: Handle KVM_MR_MOVE */ + break; } } /* * Update LPCR values in kvm->arch and in vcores. - * Caller must hold kvm->lock. + * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion + * of kvm->arch.lpcr update). */ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) { @@ -3482,20 +5424,25 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) struct kvmppc_vcore *vc = kvm->arch.vcores[i]; if (!vc) continue; + spin_lock(&vc->lock); vc->lpcr = (vc->lpcr & ~mask) | lpcr; + verify_lpcr(kvm, vc->lpcr); spin_unlock(&vc->lock); if (++cores_done >= kvm->arch.online_vcores) break; } -} -static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu) -{ - return; + if (kvmhv_is_nestedv2()) { + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(i, vcpu, kvm) { + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LPCR); + } + } } -static void kvmppc_setup_partition_table(struct kvm *kvm) +void kvmppc_setup_partition_table(struct kvm *kvm) { unsigned long dw0, dw1; @@ -3513,10 +5460,13 @@ static void kvmppc_setup_partition_table(struct kvm *kvm) __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; dw1 = PATB_GR | kvm->arch.process_table; } - - mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1); + kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1); } +/* + * Set up HPT (hashed page table) and RMA (real-mode area). + * Must be called with kvm->arch.mmu_setup_lock held. + */ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) { int err = 0; @@ -3528,10 +5478,6 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) unsigned long psize, porder; int srcu_idx; - mutex_lock(&kvm->lock); - if (kvm->arch.hpte_setup_done) - goto out; /* another vcpu beat us to it */ - /* Allocate hashed page table (if not done already) and reset it */ if (!kvm->arch.hpt.virt) { int order = KVM_DEFAULT_HPT_ORDER; @@ -3563,21 +5509,23 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) /* Look up the VMA for the start of this memory slot */ hva = memslot->userspace_addr; - down_read(¤t->mm->mmap_sem); - vma = find_vma(current->mm, hva); - if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO)) + mmap_read_lock(kvm->mm); + vma = vma_lookup(kvm->mm, hva); + if (!vma || (vma->vm_flags & VM_IO)) goto up_out; psize = vma_kernel_pagesize(vma); - porder = __ilog2(psize); - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(kvm->mm); /* We can handle 4k, 64k or 16M pages in the VRMA */ - err = -EINVAL; - if (!(psize == 0x1000 || psize == 0x10000 || - psize == 0x1000000)) - goto out_srcu; + if (psize >= 0x1000000) + psize = 0x1000000; + else if (psize >= 0x10000) + psize = 0x10000; + else + psize = 0x1000; + porder = __ilog2(psize); senc = slb_pgsize_encoding(psize); kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | @@ -3590,25 +5538,80 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) /* the -4 is to account for senc values starting at 0x10 */ lpcr = senc << (LPCR_VRMASD_SH - 4); kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); - } else { - kvmppc_setup_partition_table(kvm); } - /* Order updates to kvm->arch.lpcr etc. vs. hpte_setup_done */ + /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */ smp_wmb(); - kvm->arch.hpte_setup_done = 1; err = 0; out_srcu: srcu_read_unlock(&kvm->srcu, srcu_idx); out: - mutex_unlock(&kvm->lock); return err; up_out: - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(kvm->mm); goto out_srcu; } +/* + * Must be called with kvm->arch.mmu_setup_lock held and + * mmu_ready = 0 and no vcpus running. + */ +int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) +{ + unsigned long lpcr, lpcr_mask; + + if (nesting_enabled(kvm)) + kvmhv_release_all_nested(kvm); + kvmppc_rmap_reset(kvm); + kvm->arch.process_table = 0; + /* Mutual exclusion with kvm_unmap_gfn_range etc. */ + spin_lock(&kvm->mmu_lock); + kvm->arch.radix = 0; + spin_unlock(&kvm->mmu_lock); + kvmppc_free_radix(kvm); + + lpcr = LPCR_VPM1; + lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR; + if (cpu_has_feature(CPU_FTR_ARCH_31)) + lpcr_mask |= LPCR_HAIL; + kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); + + return 0; +} + +/* + * Must be called with kvm->arch.mmu_setup_lock held and + * mmu_ready = 0 and no vcpus running. + */ +int kvmppc_switch_mmu_to_radix(struct kvm *kvm) +{ + unsigned long lpcr, lpcr_mask; + int err; + + err = kvmppc_init_vm_radix(kvm); + if (err) + return err; + kvmppc_rmap_reset(kvm); + /* Mutual exclusion with kvm_unmap_gfn_range etc. */ + spin_lock(&kvm->mmu_lock); + kvm->arch.radix = 1; + spin_unlock(&kvm->mmu_lock); + kvmppc_free_hpt(&kvm->arch.hpt); + + lpcr = LPCR_UPRT | LPCR_GTSE | LPCR_HR; + lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR; + if (cpu_has_feature(CPU_FTR_ARCH_31)) { + lpcr_mask |= LPCR_HAIL; + if (cpu_has_feature(CPU_FTR_HVMODE) && + (kvm->arch.host_lpcr & LPCR_HAIL)) + lpcr |= LPCR_HAIL; + } + kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); + + return 0; +} + #ifdef CONFIG_KVM_XICS /* * Allocate a per-core structure for managing state about which cores are @@ -3627,6 +5630,9 @@ void kvmppc_alloc_host_rm_ops(void) int cpu, core; int size; + if (cpu_has_feature(CPU_FTR_ARCH_300)) + return; + /* Not the first time here ? */ if (kvmppc_host_rm_ops_hv != NULL) return; @@ -3692,18 +5698,51 @@ void kvmppc_free_host_rm_ops(void) static int kvmppc_core_init_vm_hv(struct kvm *kvm) { unsigned long lpcr, lpid; - char buf[32]; int ret; + mutex_init(&kvm->arch.uvmem_lock); + INIT_LIST_HEAD(&kvm->arch.uvmem_pfns); + mutex_init(&kvm->arch.mmu_setup_lock); + /* Allocate the guest's logical partition ID */ - lpid = kvmppc_alloc_lpid(); - if ((long)lpid < 0) - return -ENOMEM; - kvm->arch.lpid = lpid; + if (!kvmhv_is_nestedv2()) { + lpid = kvmppc_alloc_lpid(); + if ((long)lpid < 0) + return -ENOMEM; + kvm->arch.lpid = lpid; + } kvmppc_alloc_host_rm_ops(); + kvmhv_vm_nested_init(kvm); + + if (kvmhv_is_nestedv2()) { + long rc; + unsigned long guest_id; + + rc = plpar_guest_create(0, &guest_id); + + if (rc != H_SUCCESS) + pr_err("KVM: Create Guest hcall failed, rc=%ld\n", rc); + + switch (rc) { + case H_PARAMETER: + case H_FUNCTION: + case H_STATE: + return -EINVAL; + case H_NOT_ENOUGH_RESOURCES: + case H_ABORTED: + return -ENOMEM; + case H_AUTHORITY: + return -EPERM; + case H_NOT_AVAILABLE: + return -EBUSY; + } + kvm->arch.lpid = guest_id; + } + + /* * Since we don't flush the TLB when tearing down a VM, * and this lpid might have previously been used, @@ -3722,9 +5761,17 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); /* Init LPCR for virtual RMA mode */ - kvm->arch.host_lpid = mfspr(SPRN_LPID); - kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); - lpcr &= LPCR_PECE | LPCR_LPES; + if (cpu_has_feature(CPU_FTR_HVMODE)) { + kvm->arch.host_lpid = mfspr(SPRN_LPID); + kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); + lpcr &= LPCR_PECE | LPCR_LPES; + } else { + /* + * The L2 LPES mode will be set by the L0 according to whether + * or not it needs to take external interrupts in HV mode. + */ + lpcr = 0; + } lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | LPCR_VPM0 | LPCR_VPM1; kvm->arch.vrma_slb_v = SLB_VSID_B_1T | @@ -3747,25 +5794,34 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) * If xive is enabled, we route 0x500 interrupts directly * to the guest. */ - if (xive_enabled()) + if (xics_on_xive()) lpcr |= LPCR_LPES; } /* - * For now, if the host uses radix, the guest must be radix. + * If the host uses radix, the guest starts out as radix. */ if (radix_enabled()) { kvm->arch.radix = 1; + kvm->arch.mmu_ready = 1; lpcr &= ~LPCR_VPM1; lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR; + if (cpu_has_feature(CPU_FTR_HVMODE) && + cpu_has_feature(CPU_FTR_ARCH_31) && + (kvm->arch.host_lpcr & LPCR_HAIL)) + lpcr |= LPCR_HAIL; ret = kvmppc_init_vm_radix(kvm); if (ret) { - kvmppc_free_lpid(kvm->arch.lpid); + if (kvmhv_is_nestedv2()) + plpar_guest_delete(0, kvm->arch.lpid); + else + kvmppc_free_lpid(kvm->arch.lpid); return ret; } kvmppc_setup_partition_table(kvm); } + verify_lpcr(kvm, lpcr); kvm->arch.lpcr = lpcr; /* Initialization for future HPT resizes */ @@ -3775,7 +5831,12 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) * Work out how many sets the TLB has, for the use of * the TLB invalidation loop in book3s_hv_rmhandlers.S. */ - if (kvm_is_radix(kvm)) + if (cpu_has_feature(CPU_FTR_ARCH_31)) { + /* + * P10 will flush all the congruence class with a single tlbiel + */ + kvm->arch.tlb_sets = 1; + } else if (radix_enabled()) kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */ else if (cpu_has_feature(CPU_FTR_ARCH_300)) kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ @@ -3787,8 +5848,6 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) /* * Track that we now have a HV mode VM active. This blocks secondary * CPU threads from coming online. - * On POWER9, we only need to do this for HPT guests on a radix - * host, which is not yet supported. */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) kvm_hv_vm_activated(); @@ -3806,14 +5865,14 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) kvm->arch.smt_mode = 1; kvm->arch.emul_smt_mode = 1; - /* - * Create a debugfs directory for the VM - */ - snprintf(buf, sizeof(buf), "vm%d", current->pid); - kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir); - if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir)) - kvmppc_mmu_debugfs_init(kvm); + return 0; +} +static int kvmppc_arch_create_vm_debugfs_hv(struct kvm *kvm) +{ + kvmppc_mmu_debugfs_init(kvm); + if (radix_enabled()) + kvmhv_radix_debugfs_init(kvm); return 0; } @@ -3828,25 +5887,40 @@ static void kvmppc_free_vcores(struct kvm *kvm) static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) { - debugfs_remove_recursive(kvm->arch.debugfs_dir); - if (!cpu_has_feature(CPU_FTR_ARCH_300)) kvm_hv_vm_deactivated(); kvmppc_free_vcores(kvm); - kvmppc_free_lpid(kvm->arch.lpid); if (kvm_is_radix(kvm)) kvmppc_free_radix(kvm); else kvmppc_free_hpt(&kvm->arch.hpt); + /* Perform global invalidation and return lpid to the pool */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + if (nesting_enabled(kvm)) + kvmhv_release_all_nested(kvm); + kvm->arch.process_table = 0; + if (kvm->arch.secure_guest) + uv_svm_terminate(kvm->arch.lpid); + if (!kvmhv_is_nestedv2()) + kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); + } + + if (kvmhv_is_nestedv2()) { + kvmhv_flush_lpid(kvm->arch.lpid); + plpar_guest_delete(0, kvm->arch.lpid); + } else { + kvmppc_free_lpid(kvm->arch.lpid); + } + kvmppc_free_pimap(kvm); } /* We don't need to emulate any privileged instructions or dcbz */ -static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, +static int kvmppc_core_emulate_op_hv(struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { return EMULATE_FAIL; @@ -3866,11 +5940,15 @@ static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn, static int kvmppc_core_check_processor_compat_hv(void) { - if (!cpu_has_feature(CPU_FTR_HVMODE) || - !cpu_has_feature(CPU_FTR_ARCH_206)) - return -EIO; + if (cpu_has_feature(CPU_FTR_HVMODE) && + cpu_has_feature(CPU_FTR_ARCH_206)) + return 0; - return 0; + /* POWER9 in radix mode is capable of being a nested hypervisor. */ + if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled()) + return 0; + + return -EIO; } #ifdef CONFIG_KVM_XICS @@ -3892,6 +5970,7 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) struct kvmppc_passthru_irqmap *pimap; struct irq_chip *chip; int i, rc = 0; + struct irq_data *host_data; if (!kvm_irq_bypass) return 1; @@ -3919,7 +5998,7 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) * what our real-mode EOI code does, or a XIVE interrupt */ chip = irq_data_get_irq_chip(&desc->irq_data); - if (!chip || !(is_pnv_opal_msi(chip) || is_xive_irq(chip))) { + if (!chip || !is_pnv_opal_msi(chip)) { pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n", host_irq, guest_gsi); mutex_unlock(&kvm->lock); @@ -3956,15 +6035,22 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) * the KVM real mode handler. */ smp_wmb(); - irq_map->r_hwirq = desc->irq_data.hwirq; + + /* + * The 'host_irq' number is mapped in the PCI-MSI domain but + * the underlying calls, which will EOI the interrupt in real + * mode, need an HW IRQ number mapped in the XICS IRQ domain. + */ + host_data = irq_domain_get_irq_data(irq_get_default_domain(), host_irq); + irq_map->r_hwirq = (unsigned int)irqd_to_hwirq(host_data); if (i == pimap->n_mapped) pimap->n_mapped++; - if (xive_enabled()) - rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc); + if (xics_on_xive()) + rc = kvmppc_xive_set_mapped(kvm, guest_gsi, host_irq); else - kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq); + kvmppc_xics_set_mapped(kvm, guest_gsi, irq_map->r_hwirq); if (rc) irq_map->r_hwirq = 0; @@ -4002,12 +6088,12 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) return -ENODEV; } - if (xive_enabled()) - rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc); + if (xics_on_xive()) + rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, host_irq); else kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq); - /* invalidate the entry (what do do on error from the above ?) */ + /* invalidate the entry (what to do on error from the above ?) */ pimap->mapped[i].r_hwirq = 0; /* @@ -4057,18 +6143,24 @@ static void kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer *cons, } #endif -static long kvm_arch_vm_ioctl_hv(struct file *filp, - unsigned int ioctl, unsigned long arg) +static int kvm_arch_vm_ioctl_hv(struct file *filp, + unsigned int ioctl, unsigned long arg) { struct kvm *kvm __maybe_unused = filp->private_data; void __user *argp = (void __user *)arg; - long r; + int r; switch (ioctl) { case KVM_PPC_ALLOCATE_HTAB: { u32 htab_order; + /* If we're a nested hypervisor, we currently only support radix */ + if (kvmhv_on_pseries()) { + r = -EOPNOTSUPP; + break; + } + r = -EFAULT; if (get_user(htab_order, (u32 __user *)argp)) break; @@ -4130,8 +6222,10 @@ static unsigned int default_hcall_list[] = { H_READ, H_PROTECT, H_BULK_REMOVE, +#ifdef CONFIG_SPAPR_TCE_IOMMU H_GET_TCE, H_PUT_TCE, +#endif H_SET_DABR, H_SET_XDABR, H_CEDE, @@ -4165,6 +6259,7 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) { unsigned long lpcr; int radix; + int err; /* If not on a POWER9, reject it */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) @@ -4174,12 +6269,8 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) if (cfg->flags & ~(KVM_PPC_MMUV3_RADIX | KVM_PPC_MMUV3_GTSE)) return -EINVAL; - /* We can't change a guest to/from radix yet */ - radix = !!(cfg->flags & KVM_PPC_MMUV3_RADIX); - if (radix != kvm_is_radix(kvm)) - return -EINVAL; - /* GR (guest radix) bit in process_table field must match */ + radix = !!(cfg->flags & KVM_PPC_MMUV3_RADIX); if (!!(cfg->process_table & PATB_GR) != radix) return -EINVAL; @@ -4187,15 +6278,239 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) if ((cfg->process_table & PRTS_MASK) > 24) return -EINVAL; + /* We can change a guest to/from radix now, if the host is radix */ + if (radix && !radix_enabled()) + return -EINVAL; + + /* If we're a nested hypervisor, we currently only support radix */ + if (kvmhv_on_pseries() && !radix) + return -EINVAL; + + mutex_lock(&kvm->arch.mmu_setup_lock); + if (radix != kvm_is_radix(kvm)) { + if (kvm->arch.mmu_ready) { + kvm->arch.mmu_ready = 0; + /* order mmu_ready vs. vcpus_running */ + smp_mb(); + if (atomic_read(&kvm->arch.vcpus_running)) { + kvm->arch.mmu_ready = 1; + err = -EBUSY; + goto out_unlock; + } + } + if (radix) + err = kvmppc_switch_mmu_to_radix(kvm); + else + err = kvmppc_switch_mmu_to_hpt(kvm); + if (err) + goto out_unlock; + } + kvm->arch.process_table = cfg->process_table; kvmppc_setup_partition_table(kvm); lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0; kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE); + err = 0; + + out_unlock: + mutex_unlock(&kvm->arch.mmu_setup_lock); + return err; +} + +static int kvmhv_enable_nested(struct kvm *kvm) +{ + if (!nested) + return -EPERM; + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + return -ENODEV; + if (!radix_enabled()) + return -ENODEV; + if (kvmhv_is_nestedv2()) + return -ENODEV; + /* kvm == NULL means the caller is testing if the capability exists */ + if (kvm) + kvm->arch.nested_enable = true; return 0; } +static int kvmhv_load_from_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, + int size) +{ + int rc = -EINVAL; + + if (kvmhv_vcpu_is_radix(vcpu)) { + rc = kvmhv_copy_from_guest_radix(vcpu, *eaddr, ptr, size); + + if (rc > 0) + rc = -EINVAL; + } + + /* For now quadrants are the only way to access nested guest memory */ + if (rc && vcpu->arch.nested) + rc = -EAGAIN; + + return rc; +} + +static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, + int size) +{ + int rc = -EINVAL; + + if (kvmhv_vcpu_is_radix(vcpu)) { + rc = kvmhv_copy_to_guest_radix(vcpu, *eaddr, ptr, size); + + if (rc > 0) + rc = -EINVAL; + } + + /* For now quadrants are the only way to access nested guest memory */ + if (rc && vcpu->arch.nested) + rc = -EAGAIN; + + return rc; +} + +static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa) +{ + unpin_vpa(kvm, vpa); + vpa->gpa = 0; + vpa->pinned_addr = NULL; + vpa->dirty = false; + vpa->update_pending = 0; +} + +/* + * Enable a guest to become a secure VM, or test whether + * that could be enabled. + * Called when the KVM_CAP_PPC_SECURE_GUEST capability is + * tested (kvm == NULL) or enabled (kvm != NULL). + */ +static int kvmhv_enable_svm(struct kvm *kvm) +{ + if (!kvmppc_uvmem_available()) + return -EINVAL; + if (kvm) + kvm->arch.svm_enabled = 1; + return 0; +} + +/* + * IOCTL handler to turn off secure mode of guest + * + * - Release all device pages + * - Issue ucall to terminate the guest on the UV side + * - Unpin the VPA pages. + * - Reinit the partition scoped page tables + */ +static int kvmhv_svm_off(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + int mmu_was_ready; + int srcu_idx; + int ret = 0; + unsigned long i; + + if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) + return ret; + + mutex_lock(&kvm->arch.mmu_setup_lock); + mmu_was_ready = kvm->arch.mmu_ready; + if (kvm->arch.mmu_ready) { + kvm->arch.mmu_ready = 0; + /* order mmu_ready vs. vcpus_running */ + smp_mb(); + if (atomic_read(&kvm->arch.vcpus_running)) { + kvm->arch.mmu_ready = 1; + ret = -EBUSY; + goto out; + } + } + + srcu_idx = srcu_read_lock(&kvm->srcu); + for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { + struct kvm_memory_slot *memslot; + struct kvm_memslots *slots = __kvm_memslots(kvm, i); + int bkt; + + if (!slots) + continue; + + kvm_for_each_memslot(memslot, bkt, slots) { + kvmppc_uvmem_drop_pages(memslot, kvm, true); + uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); + } + } + srcu_read_unlock(&kvm->srcu, srcu_idx); + + ret = uv_svm_terminate(kvm->arch.lpid); + if (ret != U_SUCCESS) { + ret = -EINVAL; + goto out; + } + + /* + * When secure guest is reset, all the guest pages are sent + * to UV via UV_PAGE_IN before the non-boot vcpus get a + * chance to run and unpin their VPA pages. Unpinning of all + * VPA pages is done here explicitly so that VPA pages + * can be migrated to the secure side. + * + * This is required to for the secure SMP guest to reboot + * correctly. + */ + kvm_for_each_vcpu(i, vcpu, kvm) { + spin_lock(&vcpu->arch.vpa_update_lock); + unpin_vpa_reset(kvm, &vcpu->arch.dtl); + unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow); + unpin_vpa_reset(kvm, &vcpu->arch.vpa); + spin_unlock(&vcpu->arch.vpa_update_lock); + } + + kvmppc_setup_partition_table(kvm); + kvm->arch.secure_guest = 0; + kvm->arch.mmu_ready = mmu_was_ready; +out: + mutex_unlock(&kvm->arch.mmu_setup_lock); + return ret; +} + +static int kvmhv_enable_dawr1(struct kvm *kvm) +{ + if (!cpu_has_feature(CPU_FTR_DAWR1)) + return -ENODEV; + + /* kvm == NULL means the caller is testing if the capability exists */ + if (kvm) + kvm->arch.dawr1_enabled = true; + return 0; +} + +static bool kvmppc_hash_v3_possible(void) +{ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + return false; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return false; + + /* + * POWER9 chips before version 2.02 can't have some threads in + * HPT mode and some in radix mode on the same core. + */ + if (radix_enabled()) { + unsigned int pvr = mfspr(SPRN_PVR); + if ((pvr >> 16) == PVR_POWER9 && + (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) || + ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101))) + return false; + } + + return true; +} + static struct kvmppc_ops kvm_ops_hv = { .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, @@ -4203,6 +6518,7 @@ static struct kvmppc_ops kvm_ops_hv = { .set_one_reg = kvmppc_set_one_reg_hv, .vcpu_load = kvmppc_core_vcpu_load_hv, .vcpu_put = kvmppc_core_vcpu_put_hv, + .inject_interrupt = kvmppc_inject_interrupt_hv, .set_msr = kvmppc_set_msr_hv, .vcpu_run = kvmppc_vcpu_run_hv, .vcpu_create = kvmppc_core_vcpu_create_hv, @@ -4212,14 +6528,10 @@ static struct kvmppc_ops kvm_ops_hv = { .flush_memslot = kvmppc_core_flush_memslot_hv, .prepare_memory_region = kvmppc_core_prepare_memory_region_hv, .commit_memory_region = kvmppc_core_commit_memory_region_hv, - .unmap_hva = kvm_unmap_hva_hv, - .unmap_hva_range = kvm_unmap_hva_range_hv, - .age_hva = kvm_age_hva_hv, - .test_age_hva = kvm_test_age_hva_hv, - .set_spte_hva = kvm_set_spte_hva_hv, - .mmu_destroy = kvmppc_mmu_destroy_hv, + .unmap_gfn_range = kvm_unmap_gfn_range_hv, + .age_gfn = kvm_age_gfn_hv, + .test_age_gfn = kvm_test_age_gfn_hv, .free_memslot = kvmppc_core_free_memslot_hv, - .create_memslot = kvmppc_core_create_memslot_hv, .init_vm = kvmppc_core_init_vm_hv, .destroy_vm = kvmppc_core_destroy_vm_hv, .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv, @@ -4229,13 +6541,18 @@ static struct kvmppc_ops kvm_ops_hv = { .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, .hcall_implemented = kvmppc_hcall_impl_hv, -#ifdef CONFIG_KVM_XICS - .irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv, - .irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv, -#endif .configure_mmu = kvmhv_configure_mmu, .get_rmmu_info = kvmhv_get_rmmu_info, .set_smt_mode = kvmhv_set_smt_mode, + .enable_nested = kvmhv_enable_nested, + .load_from_eaddr = kvmhv_load_from_eaddr, + .store_to_eaddr = kvmhv_store_to_eaddr, + .enable_svm = kvmhv_enable_svm, + .svm_off = kvmhv_svm_off, + .enable_dawr1 = kvmhv_enable_dawr1, + .hash_v3_possible = kvmppc_hash_v3_possible, + .create_vcpu_debugfs = kvmppc_arch_create_vcpu_debugfs_hv, + .create_vm_debugfs = kvmppc_arch_create_vm_debugfs_hv, }; static int kvm_init_subcore_bitmap(void) @@ -4249,22 +6566,21 @@ static int kvm_init_subcore_bitmap(void) int node = cpu_to_node(first_cpu); /* Ignore if it is already allocated. */ - if (paca[first_cpu].sibling_subcore_state) + if (paca_ptrs[first_cpu]->sibling_subcore_state) continue; sibling_subcore_state = - kmalloc_node(sizeof(struct sibling_subcore_state), + kzalloc_node(sizeof(struct sibling_subcore_state), GFP_KERNEL, node); if (!sibling_subcore_state) return -ENOMEM; - memset(sibling_subcore_state, 0, - sizeof(struct sibling_subcore_state)); for (j = 0; j < threads_per_core; j++) { int cpu = first_cpu + j; - paca[cpu].sibling_subcore_state = sibling_subcore_state; + paca_ptrs[cpu]->sibling_subcore_state = + sibling_subcore_state; } } return 0; @@ -4278,6 +6594,12 @@ static int kvmppc_radix_possible(void) static int kvmppc_book3s_init_hv(void) { int r; + + if (!tlbie_capable) { + pr_err("KVM-HV: Host does not support TLBIE\n"); + return -ENODEV; + } + /* * FIXME!! Do we need to check on all cpus ? */ @@ -4285,54 +6607,98 @@ static int kvmppc_book3s_init_hv(void) if (r < 0) return -ENODEV; - r = kvm_init_subcore_bitmap(); + r = kvmhv_nested_init(); if (r) return r; + if (!cpu_has_feature(CPU_FTR_ARCH_300)) { + r = kvm_init_subcore_bitmap(); + if (r) + goto err; + } + /* * We need a way of accessing the XICS interrupt controller, - * either directly, via paca[cpu].kvm_hstate.xics_phys, or + * either directly, via paca_ptrs[cpu]->kvm_hstate.xics_phys, or * indirectly, via OPAL. */ #ifdef CONFIG_SMP - if (!xive_enabled() && !local_paca->kvm_hstate.xics_phys) { + if (!xics_on_xive() && !kvmhv_on_pseries() && + !local_paca->kvm_hstate.xics_phys) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc"); if (!np) { pr_err("KVM-HV: Cannot determine method for accessing XICS\n"); - return -ENODEV; + r = -ENODEV; + goto err; } + /* presence of intc confirmed - node can be dropped again */ + of_node_put(np); } #endif - kvm_ops_hv.owner = THIS_MODULE; - kvmppc_hv_ops = &kvm_ops_hv; - init_default_hcalls(); init_vcore_lists(); r = kvmppc_mmu_hv_init(); if (r) - return r; + goto err; - if (kvmppc_radix_possible()) + if (kvmppc_radix_possible()) { r = kvmppc_radix_init(); + if (r) + goto err; + } + + r = kvmppc_uvmem_init(); + if (r < 0) { + pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r); + return r; + } + +#if defined(CONFIG_KVM_XICS) + /* + * IRQ bypass is supported only for interrupts whose EOI operations are + * handled via OPAL calls. Therefore, register IRQ bypass handlers + * exclusively for PowerNV KVM when booted with 'xive=off', indicating + * the use of the emulated XICS interrupt controller. + */ + if (!kvmhv_on_pseries()) { + pr_info("KVM-HV: Enabling IRQ bypass\n"); + kvm_ops_hv.irq_bypass_add_producer = + kvmppc_irq_bypass_add_producer_hv; + kvm_ops_hv.irq_bypass_del_producer = + kvmppc_irq_bypass_del_producer_hv; + } +#endif + + kvm_ops_hv.owner = THIS_MODULE; + kvmppc_hv_ops = &kvm_ops_hv; + + return 0; + +err: + kvmhv_nested_exit(); + kvmppc_radix_exit(); + return r; } static void kvmppc_book3s_exit_hv(void) { + kvmppc_uvmem_free(); kvmppc_free_host_rm_ops(); if (kvmppc_radix_possible()) kvmppc_radix_exit(); kvmppc_hv_ops = NULL; + kvmhv_nested_exit(); } module_init(kvmppc_book3s_init_hv); module_exit(kvmppc_book3s_exit_hv); +MODULE_DESCRIPTION("KVM on Book3S (POWER8 and later) in hypervisor mode"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(KVM_MINOR); MODULE_ALIAS("devname:kvm"); - |
