summaryrefslogtreecommitdiff
path: root/arch/riscv/kernel/stacktrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kernel/stacktrace.c')
-rw-r--r--arch/riscv/kernel/stacktrace.c213
1 files changed, 128 insertions, 85 deletions
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index a4b1d94371a0..b41b6255751c 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2008 ARM Limited
* Copyright (C) 2014 Regents of the University of California
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/export.h>
@@ -20,27 +12,55 @@
#include <linux/stacktrace.h>
#include <linux/ftrace.h>
+#include <asm/stacktrace.h>
+
#ifdef CONFIG_FRAME_POINTER
-struct stackframe {
- unsigned long fp;
- unsigned long ra;
-};
+/*
+ * This disables KASAN checking when reading a value from another task's stack,
+ * since the other task could be running on another CPU and could have poisoned
+ * the stack in the meantime.
+ */
+#define READ_ONCE_TASK_STACK(task, x) \
+({ \
+ unsigned long val; \
+ unsigned long addr = x; \
+ if ((task) == current) \
+ val = READ_ONCE(addr); \
+ else \
+ val = READ_ONCE_NOCHECK(addr); \
+ val; \
+})
+
+extern asmlinkage void handle_exception(void);
+extern unsigned long ret_from_exception_end;
+
+static inline int fp_is_valid(unsigned long fp, unsigned long sp)
+{
+ unsigned long low, high;
+
+ low = sp + sizeof(struct stackframe);
+ high = ALIGN(sp, THREAD_SIZE);
+
+ return !(fp < low || fp > high || fp & 0x07);
+}
-static void notrace walk_stackframe(struct task_struct *task,
- struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
+void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ bool (*fn)(void *, unsigned long), void *arg)
{
unsigned long fp, sp, pc;
+ int graph_idx = 0;
+ int level = 0;
if (regs) {
- fp = GET_FP(regs);
- sp = GET_USP(regs);
- pc = GET_IP(regs);
+ fp = frame_pointer(regs);
+ sp = user_stack_pointer(regs);
+ pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
- const register unsigned long current_sp __asm__ ("sp");
fp = (unsigned long)__builtin_frame_address(0);
- sp = current_sp;
+ sp = current_stack_pointer;
pc = (unsigned long)walk_stackframe;
+ level = -1;
} else {
/* task blocked in __switch_to */
fp = task->thread.s[0];
@@ -49,44 +69,52 @@ static void notrace walk_stackframe(struct task_struct *task,
}
for (;;) {
- unsigned long low, high;
struct stackframe *frame;
- if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
+ if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
break;
- /* Validate frame pointer */
- low = sp + sizeof(struct stackframe);
- high = ALIGN(sp, THREAD_SIZE);
- if (unlikely(fp < low || fp > high || fp & 0x7))
+ if (unlikely(!fp_is_valid(fp, sp)))
break;
+
/* Unwind stack frame */
frame = (struct stackframe *)fp - 1;
sp = fp;
- fp = frame->fp;
-#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
- pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
- (unsigned long *)(fp - 8));
-#else
- pc = frame->ra - 0x4;
-#endif
+ if (regs && (regs->epc == pc) && fp_is_valid(frame->ra, sp)) {
+ /* We hit function where ra is not saved on the stack */
+ fp = frame->ra;
+ pc = regs->ra;
+ } else {
+ fp = READ_ONCE_TASK_STACK(task, frame->fp);
+ pc = READ_ONCE_TASK_STACK(task, frame->ra);
+ pc = ftrace_graph_ret_addr(current, &graph_idx, pc,
+ &frame->ra);
+ if (pc >= (unsigned long)handle_exception &&
+ pc < (unsigned long)&ret_from_exception_end) {
+ if (unlikely(!fn(arg, pc)))
+ break;
+
+ pc = ((struct pt_regs *)sp)->epc;
+ fp = ((struct pt_regs *)sp)->s0;
+ }
+ }
+
}
}
#else /* !CONFIG_FRAME_POINTER */
-static void notrace walk_stackframe(struct task_struct *task,
- struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
+void notrace walk_stackframe(struct task_struct *task,
+ struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg)
{
unsigned long sp, pc;
unsigned long *ksp;
if (regs) {
- sp = GET_USP(regs);
- pc = GET_IP(regs);
+ sp = user_stack_pointer(regs);
+ pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
- const register unsigned long current_sp __asm__ ("sp");
- sp = current_sp;
+ sp = current_stack_pointer;
pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */
@@ -99,85 +127,100 @@ static void notrace walk_stackframe(struct task_struct *task,
ksp = (unsigned long *)sp;
while (!kstack_end(ksp)) {
- if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
+ if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
break;
- pc = (*ksp++) - 0x4;
+ pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
}
}
#endif /* CONFIG_FRAME_POINTER */
-
-static bool print_trace_address(unsigned long pc, void *arg)
+static bool print_trace_address(void *arg, unsigned long pc)
{
- print_ip_sym(pc);
- return false;
+ const char *loglvl = arg;
+
+ print_ip_sym(loglvl, pc);
+ return true;
}
-void show_stack(struct task_struct *task, unsigned long *sp)
+noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
+ const char *loglvl)
{
- pr_cont("Call Trace:\n");
- walk_stackframe(task, NULL, print_trace_address, NULL);
+ walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
}
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+{
+ pr_cont("%sCall Trace:\n", loglvl);
+ dump_backtrace(NULL, task, loglvl);
+}
-static bool save_wchan(unsigned long pc, void *arg)
+static bool save_wchan(void *arg, unsigned long pc)
{
if (!in_sched_functions(pc)) {
unsigned long *p = arg;
*p = pc;
- return true;
+ return false;
}
- return false;
+ return true;
}
-unsigned long get_wchan(struct task_struct *task)
+unsigned long __get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
- if (likely(task && task != current && task->state != TASK_RUNNING))
- walk_stackframe(task, NULL, save_wchan, &pc);
+ if (!try_get_task_stack(task))
+ return 0;
+ walk_stackframe(task, NULL, save_wchan, &pc);
+ put_task_stack(task);
return pc;
}
-
-#ifdef CONFIG_STACKTRACE
-
-static bool __save_trace(unsigned long pc, void *arg, bool nosched)
-{
- struct stack_trace *trace = arg;
-
- if (unlikely(nosched && in_sched_functions(pc)))
- return false;
- if (unlikely(trace->skip > 0)) {
- trace->skip--;
- return false;
- }
-
- trace->entries[trace->nr_entries++] = pc;
- return (trace->nr_entries >= trace->max_entries);
-}
-
-static bool save_trace(unsigned long pc, void *arg)
+noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
{
- return __save_trace(pc, arg, false);
+ walk_stackframe(task, regs, consume_entry, cookie);
}
/*
- * Save stack-backtrace addresses into a stack_trace buffer.
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
*/
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry,
+ void *cookie, unsigned long fp,
+ unsigned long reg_ra)
{
- walk_stackframe(tsk, NULL, save_trace, trace);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
+ struct stackframe buftail;
+ unsigned long ra = 0;
+ unsigned long __user *user_frame_tail =
+ (unsigned long __user *)(fp - sizeof(struct stackframe));
+
+ /* Check accessibility of one struct frame_tail beyond */
+ if (!access_ok(user_frame_tail, sizeof(buftail)))
+ return 0;
+ if (__copy_from_user_inatomic(&buftail, user_frame_tail,
+ sizeof(buftail)))
+ return 0;
+
+ ra = reg_ra ? : buftail.ra;
+
+ fp = buftail.fp;
+ if (!ra || !consume_entry(cookie, ra))
+ return 0;
+
+ return fp;
}
-EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
-void save_stack_trace(struct stack_trace *trace)
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
{
- save_stack_trace_tsk(NULL, trace);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace);
+ unsigned long fp = 0;
+
+ fp = regs->s0;
+ if (!consume_entry(cookie, regs->epc))
+ return;
-#endif /* CONFIG_STACKTRACE */
+ fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra);
+ while (fp && !(fp & 0x7))
+ fp = unwind_user_frame(consume_entry, cookie, fp, 0);
+}