summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
authorVasily Gorbik <gor@linux.ibm.com>2019-11-22 13:12:57 +0100
committerVasily Gorbik <gor@linux.ibm.com>2019-11-30 10:52:45 +0100
commit7bcaad1f9fac889f5fcd1a383acf7e00d006da41 (patch)
treeaf2851175e748e5dbee515c9e7ab8369b43ffc00 /arch/s390
parent7579425777c0d802237e0d59ae395e8cf60723e1 (diff)
s390: avoid misusing CALL_ON_STACK for task stack setup
CALL_ON_STACK is intended to be used for temporary stack switching with potential return to the caller. When CALL_ON_STACK is misused to switch from nodat stack to task stack back_chain information would later lead stack unwinder from task stack into (per cpu) nodat stack which is reused for other purposes. This would yield confusing unwinding result or errors. To avoid that introduce CALL_ON_STACK_NORETURN to be used instead. It makes sure that back_chain is zeroed and unwinder finishes gracefully ending up at task pt_regs. Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/stacktrace.h11
-rw-r--r--arch/s390/kernel/setup.c9
-rw-r--r--arch/s390/kernel/smp.c2
3 files changed, 13 insertions, 9 deletions
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index bb854e33e460..4f3dd1c86c0d 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -124,4 +124,15 @@ struct stack_frame {
r2; \
})
+#define CALL_ON_STACK_NORETURN(fn, stack) \
+({ \
+ asm volatile( \
+ " la 15,0(%[_stack])\n" \
+ " xc %[_bc](8,15),%[_bc](15)\n" \
+ " brasl 14,%[_fn]\n" \
+ ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
+ [_stack] "a" (stack), [_fn] "X" (fn)); \
+ BUG(); \
+})
+
#endif /* _ASM_S390_STACKTRACE_H */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 3ff291bc63b7..9cbf490fd162 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -355,7 +355,6 @@ early_initcall(async_stack_realloc);
void __init arch_call_rest_init(void)
{
- struct stack_frame *frame;
unsigned long stack;
stack = stack_alloc();
@@ -368,13 +367,7 @@ void __init arch_call_rest_init(void)
set_task_stack_end_magic(current);
stack += STACK_INIT_OFFSET;
S390_lowcore.kernel_stack = stack;
- frame = (struct stack_frame *) stack;
- memset(frame, 0, sizeof(*frame));
- /* Branch to rest_init on the new stack, never returns */
- asm volatile(
- " la 15,0(%[_frame])\n"
- " jg rest_init\n"
- : : [_frame] "a" (frame));
+ CALL_ON_STACK_NORETURN(rest_init, stack);
}
static void __init setup_lowcore_dat_off(void)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 06dddd7c4290..2794cad9312e 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -876,7 +876,7 @@ static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
S390_lowcore.restart_source = -1UL;
__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
- CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0);
+ CALL_ON_STACK_NORETURN(smp_init_secondary, S390_lowcore.kernel_stack);
}
/* Upping and downing of CPUs */