summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm/book3s_hv_ras.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_ras.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c120
1 files changed, 76 insertions, 44 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 0787f12c1a1b..9012acadbca8 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -1,7 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
*
* Copyright 2012 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*/
@@ -11,6 +9,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/kernel.h>
+#include <asm/lppaca.h>
#include <asm/opal.h>
#include <asm/mce.h>
#include <asm/machdep.h>
@@ -66,13 +65,10 @@ static void reload_slb(struct kvm_vcpu *vcpu)
/*
* On POWER7, see if we can handle a machine check that occurred inside
* the guest in real mode, without switching to the host partition.
- *
- * Returns: 0 => exit guest, 1 => deliver machine check to guest
*/
static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
{
unsigned long srr1 = vcpu->arch.shregs.msr;
- struct machine_check_event mce_evt;
long handled = 1;
if (srr1 & SRR1_MC_LDSTERR) {
@@ -110,55 +106,91 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
handled = 0;
}
+ return handled;
+}
+
+void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
+{
+ struct machine_check_event mce_evt;
+ long handled;
+
+ if (vcpu->kvm->arch.fwnmi_enabled) {
+ /* FWNMI guests handle their own recovery */
+ handled = 0;
+ } else {
+ handled = kvmppc_realmode_mc_power7(vcpu);
+ }
+
/*
- * See if we have already handled the condition in the linux host.
- * We assume that if the condition is recovered then linux host
- * will have generated an error log event that we will pick
- * up and log later.
- * Don't release mce event now. We will queue up the event so that
- * we can log the MCE event info on host console.
+ * Now get the event and stash it in the vcpu struct so it can
+ * be handled by the primary thread in virtual mode. We can't
+ * call machine_check_queue_event() here if we are running on
+ * an offline secondary thread.
*/
- if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE))
- goto out;
+ if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) {
+ if (handled && mce_evt.version == MCE_V1)
+ mce_evt.disposition = MCE_DISPOSITION_RECOVERED;
+ } else {
+ memset(&mce_evt, 0, sizeof(mce_evt));
+ }
- if (mce_evt.version == MCE_V1 &&
- (mce_evt.severity == MCE_SEV_NO_ERROR ||
- mce_evt.disposition == MCE_DISPOSITION_RECOVERED))
- handled = 1;
+ vcpu->arch.mce_evt = mce_evt;
+}
+
+
+long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ long ret = 0;
-out:
/*
- * For guest that supports FWNMI capability, hook the MCE event into
- * vcpu structure. We are going to exit the guest with KVM_EXIT_NMI
- * exit reason. On our way to exit we will pull this event from vcpu
- * structure and print it from thread 0 of the core/subcore.
+ * Unapply and clear the offset first. That way, if the TB was not
+ * resynced then it will remain in host-offset, and if it was resynced
+ * then it is brought into host-offset. Then the tb offset is
+ * re-applied before continuing with the KVM exit.
*
- * For guest that does not support FWNMI capability (old QEMU):
- * We are now going enter guest either through machine check
- * interrupt (for unhandled errors) or will continue from
- * current HSRR0 (for handled errors) in guest. Hence
- * queue up the event so that we can log it from host console later.
+ * This way, we don't need to actually know whether not OPAL resynced
+ * the timebase or do any of the complicated dance that the P7/8
+ * path requires.
*/
- if (vcpu->kvm->arch.fwnmi_enabled) {
- /*
- * Hook up the mce event on to vcpu structure.
- * First clear the old event.
- */
- memset(&vcpu->arch.mce_evt, 0, sizeof(vcpu->arch.mce_evt));
- if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) {
- vcpu->arch.mce_evt = mce_evt;
+ if (vc->tb_offset_applied) {
+ u64 new_tb = mftb() - vc->tb_offset_applied;
+ mtspr(SPRN_TBU40, new_tb);
+ if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
+ new_tb += 0x1000000;
+ mtspr(SPRN_TBU40, new_tb);
}
- } else
- machine_check_queue_event();
+ vc->tb_offset_applied = 0;
+ }
- return handled;
-}
+ local_paca->hmi_irqs++;
-long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
-{
- return kvmppc_realmode_mc_power7(vcpu);
+ if (hmi_handle_debugtrig(NULL) >= 0) {
+ ret = 1;
+ goto out;
+ }
+
+ if (ppc_md.hmi_exception_early)
+ ppc_md.hmi_exception_early(NULL);
+
+out:
+ if (kvmppc_get_tb_offset(vcpu)) {
+ u64 new_tb = mftb() + vc->tb_offset;
+ mtspr(SPRN_TBU40, new_tb);
+ if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
+ new_tb += 0x1000000;
+ mtspr(SPRN_TBU40, new_tb);
+ }
+ vc->tb_offset_applied = kvmppc_get_tb_offset(vcpu);
+ }
+
+ return ret;
}
+/*
+ * The following subcore HMI handling is all only for pre-POWER9 CPUs.
+ */
+
/* Check if dynamic split is in force and return subcore size accordingly. */
static inline int kvmppc_cur_subcore_size(void)
{
@@ -276,7 +308,7 @@ long kvmppc_realmode_hmi_handler(void)
{
bool resync_req;
- __this_cpu_inc(irq_stat.hmi_exceptions);
+ local_paca->hmi_irqs++;
if (hmi_handle_debugtrig(NULL) >= 0)
return 1;