From 18c07d23da5a48525b2955aa269b8bb108c19300 Mon Sep 17 00:00:00 2001
From: Guo Ren <guoren@linux.alibaba.com>
Date: Wed, 13 May 2020 15:15:25 +0800
Subject: csky: Fixup calltrace panic

The implementation of show_stack will panic with wrong fp:

addr    = *fp++;

because the fp isn't checked properly.

The current implementations of show_stack, wchan and stack_trace
haven't been designed properly, so just deprecate them.

This patch is a reference to riscv's way, all codes are modified from
arm's. The patch is passed with:

 - cat /proc/<pid>/stack
 - cat /proc/<pid>/wchan
 - echo c > /proc/sysrq-trigger

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
---
 arch/csky/kernel/Makefile     |   2 +-
 arch/csky/kernel/dumpstack.c  |  49 ------------
 arch/csky/kernel/process.c    |  31 --------
 arch/csky/kernel/stacktrace.c | 176 +++++++++++++++++++++++++++++++++---------
 4 files changed, 140 insertions(+), 118 deletions(-)
 delete mode 100644 arch/csky/kernel/dumpstack.c

(limited to 'arch/csky/kernel')

diff --git a/arch/csky/kernel/Makefile b/arch/csky/kernel/Makefile
index fd6d9dc8b7f3..37f37c0e934a 100644
--- a/arch/csky/kernel/Makefile
+++ b/arch/csky/kernel/Makefile
@@ -3,7 +3,7 @@ extra-y := head.o vmlinux.lds
 
 obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o
 obj-y += power.o syscall.o syscall_table.o setup.o
-obj-y += process.o cpu-probe.o ptrace.o dumpstack.o
+obj-y += process.o cpu-probe.o ptrace.o stacktrace.o
 obj-y += probes/
 
 obj-$(CONFIG_MODULES)			+= module.o
diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c
deleted file mode 100644
index d67f9777cfd9..000000000000
--- a/arch/csky/kernel/dumpstack.c
+++ /dev/null
@@ -1,49 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
-
-#include <linux/ptrace.h>
-
-int kstack_depth_to_print = 48;
-
-void show_trace(unsigned long *stack)
-{
-	unsigned long *stack_end;
-	unsigned long *stack_start;
-	unsigned long *fp;
-	unsigned long addr;
-
-	addr = (unsigned long) stack & THREAD_MASK;
-	stack_start = (unsigned long *) addr;
-	stack_end = (unsigned long *) (addr + THREAD_SIZE);
-
-	fp = stack;
-	pr_info("\nCall Trace:");
-
-	while (fp > stack_start && fp < stack_end) {
-#ifdef CONFIG_STACKTRACE
-		addr	= fp[1];
-		fp	= (unsigned long *) fp[0];
-#else
-		addr	= *fp++;
-#endif
-		if (__kernel_text_address(addr))
-			pr_cont("\n[<%08lx>] %pS", addr, (void *)addr);
-	}
-	pr_cont("\n");
-}
-
-void show_stack(struct task_struct *task, unsigned long *stack)
-{
-	if (!stack) {
-		if (task)
-			stack = (unsigned long *)thread_saved_fp(task);
-		else
-#ifdef CONFIG_STACKTRACE
-			asm volatile("mov %0, r8\n":"=r"(stack)::"memory");
-#else
-			stack = (unsigned long *)&stack;
-#endif
-	}
-
-	show_trace(stack);
-}
diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
index f7b231ca269a..4ad6db56c3cd 100644
--- a/arch/csky/kernel/process.c
+++ b/arch/csky/kernel/process.c
@@ -98,37 +98,6 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs)
 	return 1;
 }
 
-unsigned long get_wchan(struct task_struct *p)
-{
-	unsigned long lr;
-	unsigned long *fp, *stack_start, *stack_end;
-	int count = 0;
-
-	if (!p || p == current || p->state == TASK_RUNNING)
-		return 0;
-
-	stack_start = (unsigned long *)end_of_stack(p);
-	stack_end = (unsigned long *)(task_stack_page(p) + THREAD_SIZE);
-
-	fp = (unsigned long *) thread_saved_fp(p);
-	do {
-		if (fp < stack_start || fp > stack_end)
-			return 0;
-#ifdef CONFIG_STACKTRACE
-		lr = fp[1];
-		fp = (unsigned long *)fp[0];
-#else
-		lr = *fp++;
-#endif
-		if (!in_sched_functions(lr) &&
-		    __kernel_text_address(lr))
-			return lr;
-	} while (count++ < 16);
-
-	return 0;
-}
-EXPORT_SYMBOL(get_wchan);
-
 #ifndef CONFIG_CPU_PM_NONE
 void arch_cpu_idle(void)
 {
diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c
index fec777a643f1..92809e1da723 100644
--- a/arch/csky/kernel/stacktrace.c
+++ b/arch/csky/kernel/stacktrace.c
@@ -1,57 +1,159 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. */
 
 #include <linux/sched/debug.h>
 #include <linux/sched/task_stack.h>
 #include <linux/stacktrace.h>
 #include <linux/ftrace.h>
+#include <linux/ptrace.h>
 
-void save_stack_trace(struct stack_trace *trace)
+#ifdef CONFIG_FRAME_POINTER
+
+struct stackframe {
+	unsigned long fp;
+	unsigned long ra;
+};
+
+void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+			     bool (*fn)(unsigned long, void *), void *arg)
 {
-	save_stack_trace_tsk(current, trace);
+	unsigned long fp, sp, pc;
+
+	if (regs) {
+		fp = frame_pointer(regs);
+		sp = user_stack_pointer(regs);
+		pc = instruction_pointer(regs);
+	} else if (task == NULL || task == current) {
+		const register unsigned long current_sp __asm__ ("sp");
+		const register unsigned long current_fp __asm__ ("r8");
+		fp = current_fp;
+		sp = current_sp;
+		pc = (unsigned long)walk_stackframe;
+	} else {
+		/* task blocked in __switch_to */
+		fp = thread_saved_fp(task);
+		sp = thread_saved_sp(task);
+		pc = thread_saved_lr(task);
+	}
+
+	for (;;) {
+		unsigned long low, high;
+		struct stackframe *frame;
+
+		if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
+			break;
+
+		/* Validate frame pointer */
+		low = sp;
+		high = ALIGN(sp, THREAD_SIZE);
+		if (unlikely(fp < low || fp > high || fp & 0x3))
+			break;
+		/* Unwind stack frame */
+		frame = (struct stackframe *)fp;
+		sp = fp;
+		fp = frame->fp;
+		pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
+					   (unsigned long *)(fp - 8));
+	}
 }
-EXPORT_SYMBOL_GPL(save_stack_trace);
 
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+#else /* !CONFIG_FRAME_POINTER */
+
+static void notrace walk_stackframe(struct task_struct *task,
+	struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
 {
-	unsigned long *fp, *stack_start, *stack_end;
-	unsigned long addr;
-	int skip = trace->skip;
-	int savesched;
-	int graph_idx = 0;
+	unsigned long sp, pc;
+	unsigned long *ksp;
 
-	if (tsk == current) {
-		asm volatile("mov %0, r8\n":"=r"(fp));
-		savesched = 1;
+	if (regs) {
+		sp = user_stack_pointer(regs);
+		pc = instruction_pointer(regs);
+	} else if (task == NULL || task == current) {
+		const register unsigned long current_sp __asm__ ("sp");
+		sp = current_sp;
+		pc = (unsigned long)walk_stackframe;
 	} else {
-		fp = (unsigned long *)thread_saved_fp(tsk);
-		savesched = 0;
+		/* task blocked in __switch_to */
+		sp = thread_saved_sp(task);
+		pc = thread_saved_lr(task);
 	}
 
-	addr = (unsigned long) fp & THREAD_MASK;
-	stack_start = (unsigned long *) addr;
-	stack_end = (unsigned long *) (addr + THREAD_SIZE);
-
-	while (fp > stack_start && fp < stack_end) {
-		unsigned long lpp, fpp;
+	if (unlikely(sp & 0x3))
+		return;
 
-		fpp = fp[0];
-		lpp = fp[1];
-		if (!__kernel_text_address(lpp))
+	ksp = (unsigned long *)sp;
+	while (!kstack_end(ksp)) {
+		if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
 			break;
-		else
-			lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
-
-		if (savesched || !in_sched_functions(lpp)) {
-			if (skip) {
-				skip--;
-			} else {
-				trace->entries[trace->nr_entries++] = lpp;
-				if (trace->nr_entries >= trace->max_entries)
-					break;
-			}
-		}
-		fp = (unsigned long *)fpp;
+		pc = (*ksp++) - 0x4;
 	}
 }
+#endif /* CONFIG_FRAME_POINTER */
+
+static bool print_trace_address(unsigned long pc, void *arg)
+{
+	print_ip_sym(pc);
+	return false;
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+	pr_cont("Call Trace:\n");
+	walk_stackframe(task, NULL, print_trace_address, NULL);
+}
+
+static bool save_wchan(unsigned long pc, void *arg)
+{
+	if (!in_sched_functions(pc)) {
+		unsigned long *p = arg;
+		*p = pc;
+		return true;
+	}
+	return false;
+}
+
+unsigned long get_wchan(struct task_struct *task)
+{
+	unsigned long pc = 0;
+
+	if (likely(task && task != current && task->state != TASK_RUNNING))
+		walk_stackframe(task, NULL, save_wchan, &pc);
+	return pc;
+}
+
+#ifdef CONFIG_STACKTRACE
+static bool __save_trace(unsigned long pc, void *arg, bool nosched)
+{
+	struct stack_trace *trace = arg;
+
+	if (unlikely(nosched && in_sched_functions(pc)))
+		return false;
+	if (unlikely(trace->skip > 0)) {
+		trace->skip--;
+		return false;
+	}
+
+	trace->entries[trace->nr_entries++] = pc;
+	return (trace->nr_entries >= trace->max_entries);
+}
+
+static bool save_trace(unsigned long pc, void *arg)
+{
+	return __save_trace(pc, arg, false);
+}
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+	walk_stackframe(tsk, NULL, save_trace, trace);
+}
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace(struct stack_trace *trace)
+{
+	save_stack_trace_tsk(NULL, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+#endif /* CONFIG_STACKTRACE */
-- 
cgit