summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTony Lindgren <tony@atomide.com>2020-03-04 14:54:30 -0800
committerLinus Walleij <linus.walleij@linaro.org>2020-03-09 10:24:01 +0100
commit55be2f50336f67800513b46c5ba6270e4ed0e784 (patch)
treed170710b396938722eb314005088f0d6b83a82dd
parentbb6d3fb354c5ee8d6bde2d576eb7220ea09862b9 (diff)
ARM: OMAP2+: Handle errors for cpu_pm
We need to check for errors when calling cpu_pm_enter() and cpu_cluster_pm_enter(). And we need to bail out on errors as otherwise we can enter a deeper idle state when not desired. I'm not aware of the lack of error handling causing issues yet, but we need this at least for blocking deeper idle states when a GPIO instance has pending interrupts. Cc: Dave Gerlach <d-gerlach@ti.com> Cc: Grygorii Strashko <grygorii.strashko@ti.com> Cc: Keerthy <j-keerthy@ti.com> Cc: Ladislav Michl <ladis@linux-mips.org> Cc: Russell King <rmk+kernel@armlinux.org.uk> Cc: Tero Kristo <t-kristo@ti.com> Signed-off-by: Tony Lindgren <tony@atomide.com> Link: https://lore.kernel.org/r/20200304225433.37336-2-tony@atomide.com Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c9
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c26
-rw-r--r--arch/arm/mach-omap2/pm34xx.c8
3 files changed, 30 insertions, 13 deletions
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 532a3e4b98c6..090a8aafb25e 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -109,6 +109,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
int index)
{
struct omap3_idle_statedata *cx = &omap3_idle_data[index];
+ int error;
if (omap_irq_pending() || need_resched())
goto return_sleep_time;
@@ -125,8 +126,11 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
* Call idle CPU PM enter notifier chain so that
* VFP context is saved.
*/
- if (cx->mpu_state == PWRDM_POWER_OFF)
- cpu_pm_enter();
+ if (cx->mpu_state == PWRDM_POWER_OFF) {
+ error = cpu_pm_enter();
+ if (error)
+ goto out_clkdm_set;
+ }
/* Execute ARM wfi */
omap_sram_idle();
@@ -139,6 +143,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
cpu_pm_exit();
+out_clkdm_set:
/* Re-allow idle for C1 */
if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE)
clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]);
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index fe75d4fa6073..6f5f89711f25 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -122,6 +122,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
{
struct idle_statedata *cx = state_ptr + index;
u32 mpuss_can_lose_context = 0;
+ int error;
/*
* CPU0 has to wait and stay ON until CPU1 is OFF state.
@@ -159,7 +160,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
* Call idle CPU PM enter notifier chain so that
* VFP and per CPU interrupt context is saved.
*/
- cpu_pm_enter();
+ error = cpu_pm_enter();
+ if (error)
+ goto cpu_pm_out;
if (dev->cpu == 0) {
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
@@ -169,13 +172,17 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
* Call idle CPU cluster PM enter notifier chain
* to save GIC and wakeupgen context.
*/
- if (mpuss_can_lose_context)
- cpu_cluster_pm_enter();
+ if (mpuss_can_lose_context) {
+ error = cpu_cluster_pm_enter();
+ if (error)
+ goto cpu_cluster_pm_out;
+ }
}
omap4_enter_lowpower(dev->cpu, cx->cpu_state);
cpu_done[dev->cpu] = true;
+cpu_cluster_pm_out:
/* Wakeup CPU1 only if it is not offlined */
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
@@ -198,18 +205,19 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
}
/*
- * Call idle CPU PM exit notifier chain to restore
- * VFP and per CPU IRQ context.
- */
- cpu_pm_exit();
-
- /*
* Call idle CPU cluster PM exit notifier chain
* to restore GIC and wakeupgen context.
*/
if (dev->cpu == 0 && mpuss_can_lose_context)
cpu_cluster_pm_exit();
+ /*
+ * Call idle CPU PM exit notifier chain to restore
+ * VFP and per CPU IRQ context.
+ */
+ cpu_pm_exit();
+
+cpu_pm_out:
tick_broadcast_exit();
fail:
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index e66e9948636c..6df395fff971 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -191,6 +191,7 @@ void omap_sram_idle(void)
int per_next_state = PWRDM_POWER_ON;
int core_next_state = PWRDM_POWER_ON;
u32 sdrc_pwr = 0;
+ int error;
mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
switch (mpu_next_state) {
@@ -219,8 +220,11 @@ void omap_sram_idle(void)
pwrdm_pre_transition(NULL);
/* PER */
- if (per_next_state == PWRDM_POWER_OFF)
- cpu_cluster_pm_enter();
+ if (per_next_state == PWRDM_POWER_OFF) {
+ error = cpu_cluster_pm_enter();
+ if (error)
+ return;
+ }
/* CORE */
if (core_next_state < PWRDM_POWER_ON) {