summaryrefslogtreecommitdiff
path: root/arch/loongarch/kernel/stacktrace.c
blob: 9a038d1070d73b1efcf195ea7ba02e574bca104a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
// SPDX-License-Identifier: GPL-2.0
/*
 * Stack trace management functions
 *
 * Copyright (C) 2022 Loongson Technology Corporation Limited
 */
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/uaccess.h>

#include <asm/stacktrace.h>
#include <asm/unwind.h>

void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
		     struct task_struct *task, struct pt_regs *regs)
{
	unsigned long addr;
	struct pt_regs dummyregs;
	struct unwind_state state;

	if (!regs) {
		regs = &dummyregs;

		if (task == current) {
			regs->regs[3] = (unsigned long)__builtin_frame_address(0);
			regs->csr_era = (unsigned long)__builtin_return_address(0);
		} else {
			regs->regs[3] = thread_saved_fp(task);
			regs->csr_era = thread_saved_ra(task);
		}
		regs->regs[1] = 0;
		regs->regs[22] = 0;
	}

	for (unwind_start(&state, task, regs);
	     !unwind_done(&state); unwind_next_frame(&state)) {
		addr = unwind_get_return_address(&state);
		if (!addr || !consume_entry(cookie, addr))
			break;
	}
}

int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
			     void *cookie, struct task_struct *task)
{
	unsigned long addr;
	struct pt_regs dummyregs;
	struct pt_regs *regs = &dummyregs;
	struct unwind_state state;

	if (task == current) {
		regs->regs[3] = (unsigned long)__builtin_frame_address(0);
		regs->csr_era = (unsigned long)__builtin_return_address(0);
	} else {
		regs->regs[3] = thread_saved_fp(task);
		regs->csr_era = thread_saved_ra(task);
	}
	regs->regs[1] = 0;
	regs->regs[22] = 0;

	for (unwind_start(&state, task, regs);
	     !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
		addr = unwind_get_return_address(&state);

		/*
		 * A NULL or invalid return address probably means there's some
		 * generated code which __kernel_text_address() doesn't know about.
		 */
		if (!addr)
			return -EINVAL;

		if (!consume_entry(cookie, addr))
			return -EINVAL;
	}

	/* Check for stack corruption */
	if (unwind_error(&state))
		return -EINVAL;

	return 0;
}

static int
copy_stack_frame(unsigned long fp, struct stack_frame *frame)
{
	int ret = 1;
	unsigned long err;
	unsigned long __user *user_frame_tail;

	user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
	if (!access_ok(user_frame_tail, sizeof(*frame)))
		return 0;

	pagefault_disable();
	err = (__copy_from_user_inatomic(frame, user_frame_tail, sizeof(*frame)));
	if (err || (unsigned long)user_frame_tail >= frame->fp)
		ret = 0;
	pagefault_enable();

	return ret;
}

void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
			  const struct pt_regs *regs)
{
	unsigned long fp = regs->regs[22];

	while (fp && !((unsigned long)fp & 0xf)) {
		struct stack_frame frame;

		frame.fp = 0;
		frame.ra = 0;
		if (!copy_stack_frame(fp, &frame))
			break;
		if (!frame.ra)
			break;
		if (!consume_entry(cookie, frame.ra))
			break;
		fp = frame.fp;
	}
}