summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/dumpstack_64.c
blob: 5601b95944faedef320f6fb980df21f4fbe58cd6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
// SPDX-License-Identifier: GPL-2.0
/*
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
 */
#include <linux/sched/debug.h>
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/kexec.h>
#include <linux/sysfs.h>
#include <linux/bug.h>
#include <linux/nmi.h>

#include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h>

static const char * const exception_stack_names[] = {
		[ ESTACK_DF	]	= "#DF",
		[ ESTACK_NMI	]	= "NMI",
		[ ESTACK_DB	]	= "#DB",
		[ ESTACK_MCE	]	= "#MC",
		[ ESTACK_VC	]	= "#VC",
		[ ESTACK_VC2	]	= "#VC2",
};

const char *stack_type_name(enum stack_type type)
{
	BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);

	if (type == STACK_TYPE_IRQ)
		return "IRQ";

	if (type == STACK_TYPE_ENTRY) {
		/*
		 * On 64-bit, we have a generic entry stack that we
		 * use for all the kernel entry points, including
		 * SYSENTER.
		 */
		return "ENTRY_TRAMPOLINE";
	}

	if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
		return exception_stack_names[type - STACK_TYPE_EXCEPTION];

	return NULL;
}

/**
 * struct estack_pages - Page descriptor for exception stacks
 * @offs:	Offset from the start of the exception stack area
 * @size:	Size of the exception stack
 * @type:	Type to store in the stack_info struct
 */
struct estack_pages {
	u32	offs;
	u16	size;
	u16	type;
};

#define EPAGERANGE(st)							\
	[PFN_DOWN(CEA_ESTACK_OFFS(st)) ...				\
	 PFN_DOWN(CEA_ESTACK_OFFS(st) + CEA_ESTACK_SIZE(st) - 1)] = {	\
		.offs	= CEA_ESTACK_OFFS(st),				\
		.size	= CEA_ESTACK_SIZE(st),				\
		.type	= STACK_TYPE_EXCEPTION + ESTACK_ ##st, }

/*
 * Array of exception stack page descriptors. If the stack is larger than
 * PAGE_SIZE, all pages covering a particular stack will have the same
 * info. The guard pages including the not mapped DB2 stack are zeroed
 * out.
 */
static const
struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
	EPAGERANGE(DF),
	EPAGERANGE(NMI),
	EPAGERANGE(DB),
	EPAGERANGE(MCE),
	EPAGERANGE(VC),
	EPAGERANGE(VC2),
};

static __always_inline bool in_exception_stack(unsigned long *stack, struct stack_info *info)
{
	unsigned long begin, end, stk = (unsigned long)stack;
	const struct estack_pages *ep;
	struct pt_regs *regs;
	unsigned int k;

	BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);

	begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
	/*
	 * Handle the case where stack trace is collected _before_
	 * cea_exception_stacks had been initialized.
	 */
	if (!begin)
		return false;

	end = begin + sizeof(struct cea_exception_stacks);
	/* Bail if @stack is outside the exception stack area. */
	if (stk < begin || stk >= end)
		return false;

	/* Calc page offset from start of exception stacks */
	k = (stk - begin) >> PAGE_SHIFT;
	/* Lookup the page descriptor */
	ep = &estack_pages[k];
	/* Guard page? */
	if (!ep->size)
		return false;

	begin += (unsigned long)ep->offs;
	end = begin + (unsigned long)ep->size;
	regs = (struct pt_regs *)end - 1;

	info->type	= ep->type;
	info->begin	= (unsigned long *)begin;
	info->end	= (unsigned long *)end;
	info->next_sp	= (unsigned long *)regs->sp;
	return true;
}

static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
{
	unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
	unsigned long *begin;

	/*
	 * @end points directly to the top most stack entry to avoid a -8
	 * adjustment in the stack switch hotpath. Adjust it back before
	 * calculating @begin.
	 */
	end++;
	begin = end - (IRQ_STACK_SIZE / sizeof(long));

	/*
	 * Due to the switching logic RSP can never be == @end because the
	 * final operation is 'popq %rsp' which means after that RSP points
	 * to the original stack and not to @end.
	 */
	if (stack < begin || stack >= end)
		return false;

	info->type	= STACK_TYPE_IRQ;
	info->begin	= begin;
	info->end	= end;

	/*
	 * The next stack pointer is stored at the top of the irq stack
	 * before switching to the irq stack. Actual stack entries are all
	 * below that.
	 */
	info->next_sp = (unsigned long *)*(end - 1);

	return true;
}

bool noinstr get_stack_info_noinstr(unsigned long *stack, struct task_struct *task,
				    struct stack_info *info)
{
	if (in_task_stack(stack, task, info))
		return true;

	if (task != current)
		return false;

	if (in_exception_stack(stack, info))
		return true;

	if (in_irq_stack(stack, info))
		return true;

	if (in_entry_stack(stack, info))
		return true;

	return false;
}

int get_stack_info(unsigned long *stack, struct task_struct *task,
		   struct stack_info *info, unsigned long *visit_mask)
{
	task = task ? : current;

	if (!stack)
		goto unknown;

	if (!get_stack_info_noinstr(stack, task, info))
		goto unknown;

	/*
	 * Make sure we don't iterate through any given stack more than once.
	 * If it comes up a second time then there's something wrong going on:
	 * just break out and report an unknown stack type.
	 */
	if (visit_mask) {
		if (*visit_mask & (1UL << info->type)) {
			if (task == current)
				printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type);
			goto unknown;
		}
		*visit_mask |= 1UL << info->type;
	}

	return 0;

unknown:
	info->type = STACK_TYPE_UNKNOWN;
	return -EINVAL;
}