diff options
author | Michael Ellerman <mpe@ellerman.id.au> | 2022-05-19 23:10:42 +1000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2022-05-19 23:10:42 +1000 |
commit | b104e41cda1ef9c5e851a7de3f30b53535e7d528 (patch) | |
tree | 7bab688d125e67e42d387b97c45522c76155bbe2 /arch/powerpc/kvm/book3s_hv.c | |
parent | a5fc286f69fc9590c22995fe05dca461fd6295b1 (diff) | |
parent | ad55bae7dc364417434b69dd6c30104f20d0f84d (diff) |
Merge branch 'topic/ppc-kvm' into next
Merge our KVM topic branch.
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 73 |
1 files changed, 59 insertions, 14 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 5ea107d830a6..e08fb3124dca 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1327,6 +1327,12 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd) case H_CONFER: case H_REGISTER_VPA: case H_SET_MODE: +#ifdef CONFIG_SPAPR_TCE_IOMMU + case H_GET_TCE: + case H_PUT_TCE: + case H_PUT_TCE_INDIRECT: + case H_STUFF_TCE: +#endif case H_LOGICAL_CI_LOAD: case H_LOGICAL_CI_STORE: #ifdef CONFIG_KVM_XICS @@ -2835,7 +2841,7 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) * to trap and then we emulate them. */ vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | - HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX; + HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP; if (cpu_has_feature(CPU_FTR_HVMODE)) { vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -3968,6 +3974,7 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns kvmhv_save_hv_regs(vcpu, &hvregs); hvregs.lpcr = lpcr; + hvregs.amor = ~0; vcpu->arch.regs.msr = vcpu->arch.shregs.msr; hvregs.version = HV_GUEST_STATE_VERSION; if (vcpu->arch.nested) { @@ -4030,6 +4037,8 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb) { + struct kvm *kvm = vcpu->kvm; + struct kvm_nested_guest *nested = vcpu->arch.nested; u64 next_timer; int trap; @@ -4049,34 +4058,61 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb); /* H_CEDE has to be handled now, not later */ - if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && + if (trap == BOOK3S_INTERRUPT_SYSCALL && !nested && kvmppc_get_gpr(vcpu, 3) == H_CEDE) { kvmppc_cede(vcpu); kvmppc_set_gpr(vcpu, 3, 0); trap = 0; } - } else { - struct kvm *kvm = vcpu->kvm; + } else if (nested) { + __this_cpu_write(cpu_in_guest, kvm); + trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); + __this_cpu_write(cpu_in_guest, NULL); + } else { kvmppc_xive_push_vcpu(vcpu); __this_cpu_write(cpu_in_guest, kvm); trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); __this_cpu_write(cpu_in_guest, NULL); - if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && + if (trap == BOOK3S_INTERRUPT_SYSCALL && !(vcpu->arch.shregs.msr & MSR_PR)) { unsigned long req = kvmppc_get_gpr(vcpu, 3); - /* H_CEDE has to be handled now, not later */ + /* + * XIVE rearm and XICS hcalls must be handled + * before xive context is pulled (is this + * true?) + */ if (req == H_CEDE) { + /* H_CEDE has to be handled now */ kvmppc_cede(vcpu); - kvmppc_xive_rearm_escalation(vcpu); /* may un-cede */ + if (!kvmppc_xive_rearm_escalation(vcpu)) { + /* + * Pending escalation so abort + * the cede. + */ + vcpu->arch.ceded = 0; + } kvmppc_set_gpr(vcpu, 3, 0); trap = 0; - /* XICS hcalls must be handled before xive is pulled */ + } else if (req == H_ENTER_NESTED) { + /* + * L2 should not run with the L1 + * context so rearm and pull it. + */ + if (!kvmppc_xive_rearm_escalation(vcpu)) { + /* + * Pending escalation so abort + * H_ENTER_NESTED. + */ + kvmppc_set_gpr(vcpu, 3, 0); + trap = 0; + } + } else if (hcall_is_xics(req)) { int ret; @@ -4234,13 +4270,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) start_wait = ktime_get(); vc->vcore_state = VCORE_SLEEPING; - trace_kvmppc_vcore_blocked(vc, 0); + trace_kvmppc_vcore_blocked(vc->runner, 0); spin_unlock(&vc->lock); schedule(); finish_rcuwait(&vc->wait); spin_lock(&vc->lock); vc->vcore_state = VCORE_INACTIVE; - trace_kvmppc_vcore_blocked(vc, 1); + trace_kvmppc_vcore_blocked(vc->runner, 1); ++vc->runner->stat.halt_successful_wait; cur = ktime_get(); @@ -4520,9 +4556,14 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, if (!nested) { kvmppc_core_prepare_to_enter(vcpu); - if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, - &vcpu->arch.pending_exceptions)) + if (vcpu->arch.shregs.msr & MSR_EE) { + if (xive_interrupt_pending(vcpu)) + kvmppc_inject_interrupt_hv(vcpu, + BOOK3S_INTERRUPT_EXTERNAL, 0); + } else if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, + &vcpu->arch.pending_exceptions)) { lpcr |= LPCR_MER; + } } else if (vcpu->arch.pending_exceptions || vcpu->arch.doorbell_request || xive_interrupt_pending(vcpu)) { @@ -4620,9 +4661,9 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, if (kvmppc_vcpu_check_block(vcpu)) break; - trace_kvmppc_vcore_blocked(vc, 0); + trace_kvmppc_vcore_blocked(vcpu, 0); schedule(); - trace_kvmppc_vcore_blocked(vc, 1); + trace_kvmppc_vcore_blocked(vcpu, 1); } finish_rcuwait(wait); } @@ -5284,6 +5325,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); lpcr &= LPCR_PECE | LPCR_LPES; } else { + /* + * The L2 LPES mode will be set by the L0 according to whether + * or not it needs to take external interrupts in HV mode. + */ lpcr = 0; } lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | |