summaryrefslogtreecommitdiff
path: root/arch/riscv/kernel/stacktrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kernel/stacktrace.c')
-rw-r--r--arch/riscv/kernel/stacktrace.c168
1 files changed, 104 insertions, 64 deletions
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 0940681d2f68..3fe9e6edef8f 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -12,27 +12,39 @@
#include <linux/stacktrace.h>
#include <linux/ftrace.h>
+#include <asm/stacktrace.h>
+
#ifdef CONFIG_FRAME_POINTER
-struct stackframe {
- unsigned long fp;
- unsigned long ra;
-};
+extern asmlinkage void handle_exception(void);
+extern unsigned long ret_from_exception_end;
+
+static inline int fp_is_valid(unsigned long fp, unsigned long sp)
+{
+ unsigned long low, high;
+
+ low = sp + sizeof(struct stackframe);
+ high = ALIGN(sp, THREAD_SIZE);
+
+ return !(fp < low || fp > high || fp & 0x07);
+}
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
- bool (*fn)(unsigned long, void *), void *arg)
+ bool (*fn)(void *, unsigned long), void *arg)
{
unsigned long fp, sp, pc;
+ int graph_idx = 0;
+ int level = 0;
if (regs) {
fp = frame_pointer(regs);
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
- const register unsigned long current_sp __asm__ ("sp");
fp = (unsigned long)__builtin_frame_address(0);
- sp = current_sp;
+ sp = current_stack_pointer;
pc = (unsigned long)walk_stackframe;
+ level = -1;
} else {
/* task blocked in __switch_to */
fp = task->thread.s[0];
@@ -41,30 +53,42 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
}
for (;;) {
- unsigned long low, high;
struct stackframe *frame;
- if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
+ if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
break;
- /* Validate frame pointer */
- low = sp + sizeof(struct stackframe);
- high = ALIGN(sp, THREAD_SIZE);
- if (unlikely(fp < low || fp > high || fp & 0x7))
+ if (unlikely(!fp_is_valid(fp, sp)))
break;
+
/* Unwind stack frame */
frame = (struct stackframe *)fp - 1;
sp = fp;
- fp = frame->fp;
- pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
- (unsigned long *)(fp - 8));
+ if (regs && (regs->epc == pc) && fp_is_valid(frame->ra, sp)) {
+ /* We hit function where ra is not saved on the stack */
+ fp = frame->ra;
+ pc = regs->ra;
+ } else {
+ fp = frame->fp;
+ pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
+ &frame->ra);
+ if (pc >= (unsigned long)handle_exception &&
+ pc < (unsigned long)&ret_from_exception_end) {
+ if (unlikely(!fn(arg, pc)))
+ break;
+
+ pc = ((struct pt_regs *)sp)->epc;
+ fp = ((struct pt_regs *)sp)->s0;
+ }
+ }
+
}
}
#else /* !CONFIG_FRAME_POINTER */
-static void notrace walk_stackframe(struct task_struct *task,
- struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
+void notrace walk_stackframe(struct task_struct *task,
+ struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg)
{
unsigned long sp, pc;
unsigned long *ksp;
@@ -73,8 +97,7 @@ static void notrace walk_stackframe(struct task_struct *task,
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
- const register unsigned long current_sp __asm__ ("sp");
- sp = current_sp;
+ sp = current_stack_pointer;
pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */
@@ -87,83 +110,100 @@ static void notrace walk_stackframe(struct task_struct *task,
ksp = (unsigned long *)sp;
while (!kstack_end(ksp)) {
- if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
+ if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
break;
- pc = (*ksp++) - 0x4;
+ pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
}
}
#endif /* CONFIG_FRAME_POINTER */
-
-static bool print_trace_address(unsigned long pc, void *arg)
+static bool print_trace_address(void *arg, unsigned long pc)
{
- print_ip_sym(pc);
- return false;
+ const char *loglvl = arg;
+
+ print_ip_sym(loglvl, pc);
+ return true;
}
-void show_stack(struct task_struct *task, unsigned long *sp)
+noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
+ const char *loglvl)
{
- pr_cont("Call Trace:\n");
- walk_stackframe(task, NULL, print_trace_address, NULL);
+ walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
}
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+{
+ pr_cont("%sCall Trace:\n", loglvl);
+ dump_backtrace(NULL, task, loglvl);
+}
-static bool save_wchan(unsigned long pc, void *arg)
+static bool save_wchan(void *arg, unsigned long pc)
{
if (!in_sched_functions(pc)) {
unsigned long *p = arg;
*p = pc;
- return true;
+ return false;
}
- return false;
+ return true;
}
-unsigned long get_wchan(struct task_struct *task)
+unsigned long __get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
- if (likely(task && task != current && task->state != TASK_RUNNING))
- walk_stackframe(task, NULL, save_wchan, &pc);
+ if (!try_get_task_stack(task))
+ return 0;
+ walk_stackframe(task, NULL, save_wchan, &pc);
+ put_task_stack(task);
return pc;
}
-
-#ifdef CONFIG_STACKTRACE
-
-static bool __save_trace(unsigned long pc, void *arg, bool nosched)
+noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
{
- struct stack_trace *trace = arg;
-
- if (unlikely(nosched && in_sched_functions(pc)))
- return false;
- if (unlikely(trace->skip > 0)) {
- trace->skip--;
- return false;
- }
-
- trace->entries[trace->nr_entries++] = pc;
- return (trace->nr_entries >= trace->max_entries);
-}
-
-static bool save_trace(unsigned long pc, void *arg)
-{
- return __save_trace(pc, arg, false);
+ walk_stackframe(task, regs, consume_entry, cookie);
}
/*
- * Save stack-backtrace addresses into a stack_trace buffer.
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
*/
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry,
+ void *cookie, unsigned long fp,
+ unsigned long reg_ra)
{
- walk_stackframe(tsk, NULL, save_trace, trace);
+ struct stackframe buftail;
+ unsigned long ra = 0;
+ unsigned long __user *user_frame_tail =
+ (unsigned long __user *)(fp - sizeof(struct stackframe));
+
+ /* Check accessibility of one struct frame_tail beyond */
+ if (!access_ok(user_frame_tail, sizeof(buftail)))
+ return 0;
+ if (__copy_from_user_inatomic(&buftail, user_frame_tail,
+ sizeof(buftail)))
+ return 0;
+
+ ra = reg_ra ? : buftail.ra;
+
+ fp = buftail.fp;
+ if (!ra || !consume_entry(cookie, ra))
+ return 0;
+
+ return fp;
}
-EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
-void save_stack_trace(struct stack_trace *trace)
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
{
- save_stack_trace_tsk(NULL, trace);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace);
+ unsigned long fp = 0;
-#endif /* CONFIG_STACKTRACE */
+ fp = regs->s0;
+ if (!consume_entry(cookie, regs->epc))
+ return;
+
+ fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra);
+ while (fp && !(fp & 0x7))
+ fp = unwind_user_frame(consume_entry, cookie, fp, 0);
+}