summaryrefslogtreecommitdiff
path: root/arch/riscv/kernel/stacktrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kernel/stacktrace.c')
-rw-r--r--arch/riscv/kernel/stacktrace.c128
1 files changed, 103 insertions, 25 deletions
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index ff467b98c3e3..b41b6255751c 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -14,23 +14,53 @@
#include <asm/stacktrace.h>
-register unsigned long sp_in_global __asm__("sp");
-
#ifdef CONFIG_FRAME_POINTER
+/*
+ * This disables KASAN checking when reading a value from another task's stack,
+ * since the other task could be running on another CPU and could have poisoned
+ * the stack in the meantime.
+ */
+#define READ_ONCE_TASK_STACK(task, x) \
+({ \
+ unsigned long val; \
+ unsigned long addr = x; \
+ if ((task) == current) \
+ val = READ_ONCE(addr); \
+ else \
+ val = READ_ONCE_NOCHECK(addr); \
+ val; \
+})
+
+extern asmlinkage void handle_exception(void);
+extern unsigned long ret_from_exception_end;
+
+static inline int fp_is_valid(unsigned long fp, unsigned long sp)
+{
+ unsigned long low, high;
+
+ low = sp + sizeof(struct stackframe);
+ high = ALIGN(sp, THREAD_SIZE);
+
+ return !(fp < low || fp > high || fp & 0x07);
+}
+
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
bool (*fn)(void *, unsigned long), void *arg)
{
unsigned long fp, sp, pc;
+ int graph_idx = 0;
+ int level = 0;
if (regs) {
fp = frame_pointer(regs);
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
- } else if (task == current) {
- fp = (unsigned long)__builtin_frame_address(1);
- sp = (unsigned long)__builtin_frame_address(0);
- pc = (unsigned long)__builtin_return_address(0);
+ } else if (task == NULL || task == current) {
+ fp = (unsigned long)__builtin_frame_address(0);
+ sp = current_stack_pointer;
+ pc = (unsigned long)walk_stackframe;
+ level = -1;
} else {
/* task blocked in __switch_to */
fp = task->thread.s[0];
@@ -39,27 +69,34 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
}
for (;;) {
- unsigned long low, high;
struct stackframe *frame;
- if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
+ if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
break;
- /* Validate frame pointer */
- low = sp + sizeof(struct stackframe);
- high = ALIGN(sp, THREAD_SIZE);
- if (unlikely(fp < low || fp > high || fp & 0x7))
+ if (unlikely(!fp_is_valid(fp, sp)))
break;
+
/* Unwind stack frame */
frame = (struct stackframe *)fp - 1;
sp = fp;
- if (regs && (regs->epc == pc) && (frame->fp & 0x7)) {
+ if (regs && (regs->epc == pc) && fp_is_valid(frame->ra, sp)) {
+ /* We hit function where ra is not saved on the stack */
fp = frame->ra;
pc = regs->ra;
} else {
- fp = frame->fp;
- pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
- (unsigned long *)(fp - 8));
+ fp = READ_ONCE_TASK_STACK(task, frame->fp);
+ pc = READ_ONCE_TASK_STACK(task, frame->ra);
+ pc = ftrace_graph_ret_addr(current, &graph_idx, pc,
+ &frame->ra);
+ if (pc >= (unsigned long)handle_exception &&
+ pc < (unsigned long)&ret_from_exception_end) {
+ if (unlikely(!fn(arg, pc)))
+ break;
+
+ pc = ((struct pt_regs *)sp)->epc;
+ fp = ((struct pt_regs *)sp)->s0;
+ }
}
}
@@ -77,7 +114,7 @@ void notrace walk_stackframe(struct task_struct *task,
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
- sp = sp_in_global;
+ sp = current_stack_pointer;
pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */
@@ -92,7 +129,7 @@ void notrace walk_stackframe(struct task_struct *task,
while (!kstack_end(ksp)) {
if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
break;
- pc = (*ksp++) - 0x4;
+ pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
}
}
@@ -128,21 +165,62 @@ static bool save_wchan(void *arg, unsigned long pc)
return true;
}
-unsigned long get_wchan(struct task_struct *task)
+unsigned long __get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
- if (likely(task && task != current && !task_is_running(task)))
- walk_stackframe(task, NULL, save_wchan, &pc);
+ if (!try_get_task_stack(task))
+ return 0;
+ walk_stackframe(task, NULL, save_wchan, &pc);
+ put_task_stack(task);
return pc;
}
-#ifdef CONFIG_STACKTRACE
-
-noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
walk_stackframe(task, regs, consume_entry, cookie);
}
-#endif /* CONFIG_STACKTRACE */
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry,
+ void *cookie, unsigned long fp,
+ unsigned long reg_ra)
+{
+ struct stackframe buftail;
+ unsigned long ra = 0;
+ unsigned long __user *user_frame_tail =
+ (unsigned long __user *)(fp - sizeof(struct stackframe));
+
+ /* Check accessibility of one struct frame_tail beyond */
+ if (!access_ok(user_frame_tail, sizeof(buftail)))
+ return 0;
+ if (__copy_from_user_inatomic(&buftail, user_frame_tail,
+ sizeof(buftail)))
+ return 0;
+
+ ra = reg_ra ? : buftail.ra;
+
+ fp = buftail.fp;
+ if (!ra || !consume_entry(cookie, ra))
+ return 0;
+
+ return fp;
+}
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
+{
+ unsigned long fp = 0;
+
+ fp = regs->s0;
+ if (!consume_entry(cookie, regs->epc))
+ return;
+
+ fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra);
+ while (fp && !(fp & 0x7))
+ fp = unwind_user_frame(consume_entry, cookie, fp, 0);
+}