summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/hw_irq.h
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2021-09-23 00:54:50 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2021-12-16 21:31:45 +1100
commit0faf20a1ad1647c0fc0f5a367c71e5e84deaf899 (patch)
treebdfd3c02c8df287d0fc8e472704892fef5638b7a /arch/powerpc/include/asm/hw_irq.h
parent5a7745b96f43c69f9b4875bcf516a0341acbc3fb (diff)
powerpc/64s/interrupt: Don't enable MSR[EE] in irq handlers unless perf is in use
Enabling MSR[EE] in interrupt handlers while interrupts are still soft masked allows PMIs to profile interrupt handlers to some degree, beyond what SIAR latching allows. When perf is not being used, this is almost useless work. It requires an extra mtmsrd in the irq handler, and it also opens the door to masked interrupts hitting and requiring replay, which is more expensive than just taking them directly. This effect can be noticable in high IRQ workloads. Avoid enabling MSR[EE] unless perf is currently in use. This saves about 60 cycles (or 8%) on a simple decrementer interrupt microbenchmark. Replayed interrupts drop from 1.4% of all interrupts taken, to 0.003%. This does prevent the soft-nmi interrupt being taken in these handlers, but that's not too reliable anyway. The SMP watchdog will continue to be the reliable way to catch lockups. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210922145452.352571-5-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/include/asm/hw_irq.h')
-rw-r--r--arch/powerpc/include/asm/hw_irq.h57
1 files changed, 47 insertions, 10 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 8d6f80101eda..a58fb4aa6c81 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -345,17 +345,54 @@ static inline bool lazy_irq_pending_nocheck(void)
bool power_pmu_wants_prompt_pmi(void);
/*
- * This is called by asynchronous interrupts to conditionally
- * re-enable hard interrupts after having cleared the source
- * of the interrupt. They are kept disabled if there is a different
- * soft-masked interrupt pending that requires hard masking.
+ * This is called by asynchronous interrupts to check whether to
+ * conditionally re-enable hard interrupts after having cleared
+ * the source of the interrupt. They are kept disabled if there
+ * is a different soft-masked interrupt pending that requires hard
+ * masking.
*/
-static inline void may_hard_irq_enable(void)
+static inline bool should_hard_irq_enable(void)
{
- if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
- get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
- __hard_irq_enable();
- }
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
+ WARN_ON(mfmsr() & MSR_EE);
+#endif
+#ifdef CONFIG_PERF_EVENTS
+ /*
+ * If the PMU is not running, there is not much reason to enable
+ * MSR[EE] in irq handlers because any interrupts would just be
+ * soft-masked.
+ *
+ * TODO: Add test for 64e
+ */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
+ return false;
+
+ if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
+ return false;
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+/*
+ * Do the hard enabling, only call this if should_hard_irq_enable is true.
+ */
+static inline void do_hard_irq_enable(void)
+{
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
+ WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
+ WARN_ON(mfmsr() & MSR_EE);
+#endif
+ /*
+ * This allows PMI interrupts (and watchdog soft-NMIs) through.
+ * There is no other reason to enable this way.
+ */
+ get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ __hard_irq_enable();
}
static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
@@ -436,7 +473,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE);
}
-static inline bool may_hard_irq_enable(void)
+static inline bool should_hard_irq_enable(void)
{
return false;
}