summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/traps.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2019-02-26 18:51:07 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2019-02-26 23:28:24 +1100
commitccd477028a202993b9ddca5d2404fdaca3b7a55c (patch)
tree075f65a46de61f8f45856cd0bf8fa71e85b9262c /arch/powerpc/kernel/traps.c
parent3b4d07d2674f6b4a9281031f99d1f7efd325b16d (diff)
powerpc/64s: Fix HV NMI vs HV interrupt recoverability test
HV interrupts that use HSRR registers do not enter with MSR[RI] clear, but their entry code is not recoverable vs NMI, due to shared use of HSPRG1 as a scratch register to save r13. This means that a system reset or machine check that hits in HSRR interrupt entry can cause r13 to be silently corrupted. Fix this by marking NMIs non-recoverable if they land in HV interrupt ranges. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel/traps.c')
-rw-r--r--arch/powerpc/kernel/traps.c66
1 files changed, 66 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index b25bc8af7d38..eee8f843f3d6 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -369,6 +369,70 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
force_sig_fault(signr, code, (void __user *)addr, current);
}
+/*
+ * The interrupt architecture has a quirk in that the HV interrupts excluding
+ * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing
+ * that an interrupt handler must do is save off a GPR into a scratch register,
+ * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch.
+ * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing
+ * that it is non-reentrant, which leads to random data corruption.
+ *
+ * The solution is for NMI interrupts in HV mode to check if they originated
+ * from these critical HV interrupt regions. If so, then mark them not
+ * recoverable.
+ *
+ * An alternative would be for HV NMIs to use SPRG for scratch to avoid the
+ * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux
+ * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so
+ * that would work. However any other guest OS that may have the SPRG live
+ * and MSR[RI]=1 could encounter silent corruption.
+ *
+ * Builds that do not support KVM could take this second option to increase
+ * the recoverability of NMIs.
+ */
+void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC_POWERNV
+ unsigned long kbase = (unsigned long)_stext;
+ unsigned long nip = regs->nip;
+
+ if (!(regs->msr & MSR_RI))
+ return;
+ if (!(regs->msr & MSR_HV))
+ return;
+ if (regs->msr & MSR_PR)
+ return;
+
+ /*
+ * Now test if the interrupt has hit a range that may be using
+ * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The
+ * problem ranges all run un-relocated. Test real and virt modes
+ * at the same time by droping the high bit of the nip (virt mode
+ * entry points still have the +0x4000 offset).
+ */
+ nip &= ~0xc000000000000000ULL;
+ if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600))
+ goto nonrecoverable;
+ if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00))
+ goto nonrecoverable;
+ if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0))
+ goto nonrecoverable;
+ if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0))
+ goto nonrecoverable;
+ /* Trampoline code runs un-relocated so subtract kbase. */
+ if (nip >= real_trampolines_start - kbase &&
+ nip < real_trampolines_end - kbase)
+ goto nonrecoverable;
+ if (nip >= virt_trampolines_start - kbase &&
+ nip < virt_trampolines_end - kbase)
+ goto nonrecoverable;
+ return;
+
+nonrecoverable:
+ regs->msr &= ~MSR_RI;
+#endif
+}
+
void system_reset_exception(struct pt_regs *regs)
{
/*
@@ -379,6 +443,8 @@ void system_reset_exception(struct pt_regs *regs)
if (!nested)
nmi_enter();
+ hv_nmi_check_nonrecoverable(regs);
+
__this_cpu_inc(irq_stat.sreset_irqs);
/* See if any machine dependent calls */