summaryrefslogtreecommitdiff
path: root/arch/arm/kernel/sleep.S
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-07-02 09:54:01 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-07-02 09:54:01 +0100
commit29cb3cd208dd0e4471bb80bec4facc49ceb199fa (patch)
tree035128bf7af997d5e1e5208c900ba78c5a1df46d /arch/arm/kernel/sleep.S
parentcbe263497def23befb6f475977661bae5d1f82e4 (diff)
ARM: pm: allow suspend finisher to return error codes
There are SoCs where attempting to enter a low power state is ignored, and the CPU continues executing instructions with all state preserved. It is over-complex at that point to disable the MMU just to call the resume path. Instead, allow the suspend finisher to return error codes to abort suspend in this circumstance, where the cpu_suspend internals will then unwind the saved state on the stack. Also omit the tlb flush as no changes to the page tables will have happened. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/sleep.S')
-rw-r--r--arch/arm/kernel/sleep.S11
1 files changed, 9 insertions, 2 deletions
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index c156d0e5f455..dc902f2c6845 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -12,7 +12,6 @@
* r1 = v:p offset
* r2 = suspend function arg0
* r3 = suspend function
- * Note: does not return until system resumes
*/
ENTRY(__cpu_suspend)
stmfd sp!, {r4 - r11, lr}
@@ -26,7 +25,7 @@ ENTRY(__cpu_suspend)
#endif
mov r6, sp @ current virtual SP
sub sp, sp, r5 @ allocate CPU state on stack
- mov r0, sp @ save pointer
+ mov r0, sp @ save pointer to CPU save block
add ip, ip, r1 @ convert resume fn to phys
stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn
ldr r5, =sleep_save_sp
@@ -55,10 +54,17 @@ ENTRY(__cpu_suspend)
#else
bl __cpuc_flush_kern_all
#endif
+ adr lr, BSYM(cpu_suspend_abort)
ldmfd sp!, {r0, pc} @ call suspend fn
ENDPROC(__cpu_suspend)
.ltorg
+cpu_suspend_abort:
+ ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn
+ mov sp, r2
+ ldmfd sp!, {r4 - r11, pc}
+ENDPROC(cpu_suspend_abort)
+
/*
* r0 = control register value
* r1 = v:p offset (preserved by cpu_do_resume)
@@ -89,6 +95,7 @@ cpu_resume_after_mmu:
str r5, [r2, r4, lsl #2] @ restore old mapping
mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
bl cpu_init @ restore the und/abt/irq banked regs
+ mov r0, #0 @ return zero on success
ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_resume_after_mmu)