diff options
author | Christophe Leroy <christophe.leroy@csgroup.eu> | 2021-03-12 12:50:17 +0000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2021-03-29 13:22:04 +1100 |
commit | be39e10506830a2e654fb799a48025999f89a6ff (patch) | |
tree | 415685c0cd9b04f4f5740f640fc0ca22288f82d6 /arch/powerpc/kernel/entry_32.S | |
parent | 0512aadd750acf72b8906973c34e7092642d4323 (diff) |
powerpc/32: Reconcile interrupts in C
There is no need for this to be in asm anymore,
use the new interrupt entry wrapper.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/602e1ec47e15ca540f7edb9cf6feb6c249911bd6.1615552866.git.christophe.leroy@csgroup.eu
Diffstat (limited to 'arch/powerpc/kernel/entry_32.S')
-rw-r--r-- | arch/powerpc/kernel/entry_32.S | 58 |
1 files changed, 0 insertions, 58 deletions
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 535c55f4393a..0f18fe14649c 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -203,22 +203,6 @@ transfer_to_handler_cont: #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) mtspr SPRN_NRI, r0 #endif -#ifdef CONFIG_TRACE_IRQFLAGS - /* - * When tracing IRQ state (lockdep) we enable the MMU before we call - * the IRQ tracing functions as they might access vmalloc space or - * perform IOs for console output. - * - * To speed up the syscall path where interrupts stay on, let's check - * first if we are changing the MSR value at all. - */ - tophys_novmstack r12, r1 - lwz r12,_MSR(r12) - andi. r12,r12,MSR_EE - bne 1f - - /* MSR isn't changing, just transition directly */ -#endif mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r10 mtlr r9 @@ -244,48 +228,6 @@ transfer_to_handler_cont: _ASM_NOKPROBE_SYMBOL(transfer_to_handler) _ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont) -#ifdef CONFIG_TRACE_IRQFLAGS -1: /* MSR is changing, re-enable MMU so we can notify lockdep. We need to - * keep interrupts disabled at this point otherwise we might risk - * taking an interrupt before we tell lockdep they are enabled. - */ - lis r12,reenable_mmu@h - ori r12,r12,reenable_mmu@l - LOAD_REG_IMMEDIATE(r0, MSR_KERNEL) - mtspr SPRN_SRR0,r12 - mtspr SPRN_SRR1,r0 - rfi -#ifdef CONFIG_40x - b . /* Prevent prefetch past rfi */ -#endif - -reenable_mmu: - /* - * We save a bunch of GPRs, - * r3 can be different from GPR3(r1) at this point, r9 and r11 - * contains the old MSR and handler address respectively, - * r0, r4-r8, r12, CCR, CTR, XER etc... are left - * clobbered as they aren't useful past this point. - */ - - stwu r1,-32(r1) - stw r9,8(r1) - stw r11,12(r1) - stw r3,16(r1) - - /* If we are disabling interrupts (normal case), simply log it with - * lockdep - */ -1: bl trace_hardirqs_off - lwz r3,16(r1) - lwz r11,12(r1) - lwz r9,8(r1) - addi r1,r1,32 - mtctr r11 - mtlr r9 - bctr /* jump to handler */ -#endif /* CONFIG_TRACE_IRQFLAGS */ - #ifndef CONFIG_VMAP_STACK /* * On kernel stack overflow, load up an initial stack pointer |