summaryrefslogtreecommitdiff
path: root/arch/riscv/kernel/stacktrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kernel/stacktrace.c')
-rw-r--r--arch/riscv/kernel/stacktrace.c72
1 files changed, 67 insertions, 5 deletions
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 10e311b2759d..b41b6255751c 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -16,7 +16,24 @@
#ifdef CONFIG_FRAME_POINTER
-extern asmlinkage void ret_from_exception(void);
+/*
+ * This disables KASAN checking when reading a value from another task's stack,
+ * since the other task could be running on another CPU and could have poisoned
+ * the stack in the meantime.
+ */
+#define READ_ONCE_TASK_STACK(task, x) \
+({ \
+ unsigned long val; \
+ unsigned long addr = x; \
+ if ((task) == current) \
+ val = READ_ONCE(addr); \
+ else \
+ val = READ_ONCE_NOCHECK(addr); \
+ val; \
+})
+
+extern asmlinkage void handle_exception(void);
+extern unsigned long ret_from_exception_end;
static inline int fp_is_valid(unsigned long fp, unsigned long sp)
{
@@ -68,11 +85,13 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
fp = frame->ra;
pc = regs->ra;
} else {
- fp = frame->fp;
- pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
+ fp = READ_ONCE_TASK_STACK(task, frame->fp);
+ pc = READ_ONCE_TASK_STACK(task, frame->ra);
+ pc = ftrace_graph_ret_addr(current, &graph_idx, pc,
&frame->ra);
- if (pc == (unsigned long)ret_from_exception) {
- if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
+ if (pc >= (unsigned long)handle_exception &&
+ pc < (unsigned long)&ret_from_exception_end) {
+ if (unlikely(!fn(arg, pc)))
break;
pc = ((struct pt_regs *)sp)->epc;
@@ -162,3 +181,46 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void
{
walk_stackframe(task, regs, consume_entry, cookie);
}
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry,
+ void *cookie, unsigned long fp,
+ unsigned long reg_ra)
+{
+ struct stackframe buftail;
+ unsigned long ra = 0;
+ unsigned long __user *user_frame_tail =
+ (unsigned long __user *)(fp - sizeof(struct stackframe));
+
+ /* Check accessibility of one struct frame_tail beyond */
+ if (!access_ok(user_frame_tail, sizeof(buftail)))
+ return 0;
+ if (__copy_from_user_inatomic(&buftail, user_frame_tail,
+ sizeof(buftail)))
+ return 0;
+
+ ra = reg_ra ? : buftail.ra;
+
+ fp = buftail.fp;
+ if (!ra || !consume_entry(cookie, ra))
+ return 0;
+
+ return fp;
+}
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
+{
+ unsigned long fp = 0;
+
+ fp = regs->s0;
+ if (!consume_entry(cookie, regs->epc))
+ return;
+
+ fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra);
+ while (fp && !(fp & 0x7))
+ fp = unwind_user_frame(consume_entry, cookie, fp, 0);
+}