diff options
Diffstat (limited to 'arch/mips/kernel/idle.c')
| -rw-r--r-- | arch/mips/kernel/idle.c | 123 |
1 files changed, 65 insertions, 58 deletions
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 0c655deeea4a..80e8a04a642e 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * MIPS idle loop and WAIT instruction support. * @@ -5,12 +6,8 @@ * Copyright (C) 1994 - 2006 Ralf Baechle * Copyright (C) 2003, 2004 Maciej W. Rozycki * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. */ +#include <linux/cpu.h> #include <linux/export.h> #include <linux/init.h> #include <linux/irqflags.h> @@ -18,6 +15,7 @@ #include <linux/sched.h> #include <asm/cpu.h> #include <asm/cpu-info.h> +#include <asm/cpu-type.h> #include <asm/idle.h> #include <asm/mipsregs.h> @@ -31,24 +29,10 @@ void (*cpu_wait)(void); EXPORT_SYMBOL(cpu_wait); -static void r3081_wait(void) +static void __cpuidle r3081_wait(void) { unsigned long cfg = read_c0_conf(); write_c0_conf(cfg | R30XX_CONF_HALT); - local_irq_enable(); -} - -static void r39xx_wait(void) -{ - if (!need_resched()) - write_c0_conf(read_c0_conf() | TX39_CONF_HALT); - local_irq_enable(); -} - -void r4k_wait(void) -{ - local_irq_enable(); - __r4k_wait(); } /* @@ -58,30 +42,26 @@ void r4k_wait(void) * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes * using this version a gamble. */ -void r4k_wait_irqoff(void) +void __cpuidle r4k_wait_irqoff(void) { if (!need_resched()) __asm__( " .set push \n" - " .set mips3 \n" + " .set arch=r4000 \n" " wait \n" " .set pop \n"); - local_irq_enable(); - __asm__( - " .globl __pastwait \n" - "__pastwait: \n"); } /* * The RM7000 variant has to handle erratum 38. The workaround is to not * have any pending stores when the WAIT instruction is executed. */ -static void rm7k_wait_irqoff(void) +static void __cpuidle rm7k_wait_irqoff(void) { if (!need_resched()) __asm__( " .set push \n" - " .set mips3 \n" + " .set arch=r4000 \n" " .set noat \n" " mfc0 $1, $12 \n" " sync \n" @@ -89,7 +69,6 @@ static void rm7k_wait_irqoff(void) " wait \n" " mtc0 $1, $12 # stalls until W stage \n" " .set pop \n"); - local_irq_enable(); } /* @@ -97,12 +76,13 @@ static void rm7k_wait_irqoff(void) * since coreclock (and the cp0 counter) stops upon executing it. Only an * interrupt can wake it, so they must be enabled before entering idle modes. */ -static void au1k_wait(void) +static void __cpuidle au1k_wait(void) { unsigned long c0status = read_c0_status() | 1; /* irqs on */ __asm__( - " .set mips3 \n" + " .set push \n" + " .set arch=r4000 \n" " cache 0x14, 0(%0) \n" " cache 0x14, 32(%0) \n" " sync \n" @@ -112,8 +92,10 @@ static void au1k_wait(void) " nop \n" " nop \n" " nop \n" - " .set mips0 \n" + " .set pop \n" : : "r" (au1k_wait), "r" (c0status)); + + raw_local_irq_disable(); } static int __initdata nowait; @@ -136,14 +118,21 @@ void __init check_wait(void) return; } - switch (c->cputype) { + /* + * MIPSr6 specifies that masked interrupts should unblock an executing + * wait instruction, and thus that it is safe for us to use + * r4k_wait_irqoff. Yippee! + */ + if (cpu_has_mips_r6) { + cpu_wait = r4k_wait_irqoff; + return; + } + + switch (current_cpu_type()) { case CPU_R3081: case CPU_R3081E: cpu_wait = r3081_wait; break; - case CPU_TX3927: - cpu_wait = r39xx_wait; - break; case CPU_R4200: /* case CPU_R4300: */ case CPU_R4600: @@ -157,31 +146,54 @@ void __init check_wait(void) case CPU_4KEC: case CPU_4KSC: case CPU_5KC: + case CPU_5KE: case CPU_25KF: case CPU_PR4450: case CPU_BMIPS3300: case CPU_BMIPS4350: case CPU_BMIPS4380: - case CPU_BMIPS5000: case CPU_CAVIUM_OCTEON: case CPU_CAVIUM_OCTEON_PLUS: case CPU_CAVIUM_OCTEON2: - case CPU_JZRISC: - case CPU_LOONGSON1: - case CPU_XLR: - case CPU_XLP: + case CPU_CAVIUM_OCTEON3: + case CPU_XBURST: + case CPU_LOONGSON32: cpu_wait = r4k_wait; break; + case CPU_LOONGSON64: + if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >= + (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) || + (c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R) + cpu_wait = r4k_wait; + break; + case CPU_BMIPS5000: + cpu_wait = r4k_wait_irqoff; + break; case CPU_RM7000: cpu_wait = rm7k_wait_irqoff; break; + case CPU_PROAPTIV: + case CPU_P5600: + /* + * Incoming Fast Debug Channel (FDC) data during a wait + * instruction causes the wait never to resume, even if an + * interrupt is received. Avoid using wait at all if FDC data is + * likely to be received. + */ + if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY)) + break; + fallthrough; case CPU_M14KC: case CPU_M14KEC: case CPU_24K: case CPU_34K: case CPU_1004K: + case CPU_1074K: + case CPU_INTERAPTIV: + case CPU_M5150: + case CPU_QEMU_GENERIC: cpu_wait = r4k_wait; if (read_c0_config7() & MIPS_CONF7_WII) cpu_wait = r4k_wait_irqoff; @@ -209,7 +221,7 @@ void __init check_wait(void) break; /* - * Another rev is incremeting c0_count at a reduced clock + * Another rev is incrementing c0_count at a reduced clock * rate while in WAIT mode. So we basically have the choice * between using the cp0 timer as clocksource or avoiding * the WAIT instruction. Until more details are known, @@ -217,29 +229,24 @@ void __init check_wait(void) cpu_wait = r4k_wait; */ break; - case CPU_RM9000: - if ((c->processor_id & 0x00ff) >= 0x40) - cpu_wait = r4k_wait; - break; default: break; } } -static void smtc_idle_hook(void) +__cpuidle void arch_cpu_idle(void) { -#ifdef CONFIG_MIPS_MT_SMTC - void smtc_idle_loop_hook(void); - - smtc_idle_loop_hook(); -#endif + if (cpu_wait) + cpu_wait(); } -void arch_cpu_idle(void) +#ifdef CONFIG_CPU_IDLE + +__cpuidle int mips_cpuidle_wait_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { - smtc_idle_hook(); - if (cpu_wait) - cpu_wait(); - else - local_irq_enable(); + arch_cpu_idle(); + return index; } + +#endif |
