diff options
author | Tony Lindgren <tony@atomide.com> | 2020-03-04 14:54:30 -0800 |
---|---|---|
committer | Linus Walleij <linus.walleij@linaro.org> | 2020-03-09 10:24:01 +0100 |
commit | 55be2f50336f67800513b46c5ba6270e4ed0e784 (patch) | |
tree | d170710b396938722eb314005088f0d6b83a82dd /arch/arm/mach-omap2/cpuidle44xx.c | |
parent | bb6d3fb354c5ee8d6bde2d576eb7220ea09862b9 (diff) |
ARM: OMAP2+: Handle errors for cpu_pm
We need to check for errors when calling cpu_pm_enter() and
cpu_cluster_pm_enter(). And we need to bail out on errors as
otherwise we can enter a deeper idle state when not desired.
I'm not aware of the lack of error handling causing issues yet,
but we need this at least for blocking deeper idle states when
a GPIO instance has pending interrupts.
Cc: Dave Gerlach <d-gerlach@ti.com>
Cc: Grygorii Strashko <grygorii.strashko@ti.com>
Cc: Keerthy <j-keerthy@ti.com>
Cc: Ladislav Michl <ladis@linux-mips.org>
Cc: Russell King <rmk+kernel@armlinux.org.uk>
Cc: Tero Kristo <t-kristo@ti.com>
Signed-off-by: Tony Lindgren <tony@atomide.com>
Link: https://lore.kernel.org/r/20200304225433.37336-2-tony@atomide.com
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Diffstat (limited to 'arch/arm/mach-omap2/cpuidle44xx.c')
-rw-r--r-- | arch/arm/mach-omap2/cpuidle44xx.c | 26 |
1 files changed, 17 insertions, 9 deletions
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index fe75d4fa6073..6f5f89711f25 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -122,6 +122,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, { struct idle_statedata *cx = state_ptr + index; u32 mpuss_can_lose_context = 0; + int error; /* * CPU0 has to wait and stay ON until CPU1 is OFF state. @@ -159,7 +160,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, * Call idle CPU PM enter notifier chain so that * VFP and per CPU interrupt context is saved. */ - cpu_pm_enter(); + error = cpu_pm_enter(); + if (error) + goto cpu_pm_out; if (dev->cpu == 0) { pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); @@ -169,13 +172,17 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, * Call idle CPU cluster PM enter notifier chain * to save GIC and wakeupgen context. */ - if (mpuss_can_lose_context) - cpu_cluster_pm_enter(); + if (mpuss_can_lose_context) { + error = cpu_cluster_pm_enter(); + if (error) + goto cpu_cluster_pm_out; + } } omap4_enter_lowpower(dev->cpu, cx->cpu_state); cpu_done[dev->cpu] = true; +cpu_cluster_pm_out: /* Wakeup CPU1 only if it is not offlined */ if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { @@ -198,18 +205,19 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, } /* - * Call idle CPU PM exit notifier chain to restore - * VFP and per CPU IRQ context. - */ - cpu_pm_exit(); - - /* * Call idle CPU cluster PM exit notifier chain * to restore GIC and wakeupgen context. */ if (dev->cpu == 0 && mpuss_can_lose_context) cpu_cluster_pm_exit(); + /* + * Call idle CPU PM exit notifier chain to restore + * VFP and per CPU IRQ context. + */ + cpu_pm_exit(); + +cpu_pm_out: tick_broadcast_exit(); fail: |