summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/stacktrace/nvhe.h
blob: 8a5cb96d714375f0ab46fcd38003a3030a1bb119 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * KVM nVHE hypervisor stack tracing support.
 *
 * The unwinder implementation depends on the nVHE mode:
 *
 *   1) Non-protected nVHE mode - the host can directly access the
 *      HYP stack pages and unwind the HYP stack in EL1. This saves having
 *      to allocate shared buffers for the host to read the unwinded
 *      stacktrace.
 *
 *   2) pKVM (protected nVHE) mode - the host cannot directly access
 *      the HYP memory. The stack is unwinded in EL2 and dumped to a shared
 *      buffer where the host can read and print the stacktrace.
 *
 * Copyright (C) 2022 Google LLC
 */
#ifndef __ASM_STACKTRACE_NVHE_H
#define __ASM_STACKTRACE_NVHE_H

#include <asm/stacktrace/common.h>

/*
 * kvm_nvhe_unwind_init - Start an unwind from the given nVHE HYP fp and pc
 *
 * @state : unwind_state to initialize
 * @fp    : frame pointer at which to start the unwinding.
 * @pc    : program counter at which to start the unwinding.
 */
static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
					unsigned long fp,
					unsigned long pc)
{
	unwind_init_common(state, NULL);

	state->fp = fp;
	state->pc = pc;
}

static inline bool on_hyp_stack(unsigned long sp, unsigned long size,
				struct stack_info *info);

static inline bool on_accessible_stack(const struct task_struct *tsk,
				       unsigned long sp, unsigned long size,
				       struct stack_info *info)
{
	if (on_accessible_stack_common(tsk, sp, size, info))
		return true;

	if (on_hyp_stack(sp, size, info))
		return true;

	return false;
}

#ifdef __KVM_NVHE_HYPERVISOR__
/*
 * Protected nVHE HYP stack unwinder
 *
 * In protected mode, the unwinding is done by the hypervisor in EL2.
 */

#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
				     struct stack_info *info)
{
	unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack);
	unsigned long high = low + OVERFLOW_STACK_SIZE;

	return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
}

static inline bool on_hyp_stack(unsigned long sp, unsigned long size,
				struct stack_info *info)
{
	struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
	unsigned long high = params->stack_hyp_va;
	unsigned long low = high - PAGE_SIZE;

	return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
}

static inline int notrace unwind_next(struct unwind_state *state)
{
	struct stack_info info;

	return unwind_next_common(state, &info, NULL);
}
NOKPROBE_SYMBOL(unwind_next);
#endif	/* CONFIG_PROTECTED_NVHE_STACKTRACE */

#else	/* !__KVM_NVHE_HYPERVISOR__ */
/*
 * Conventional (non-protected) nVHE HYP stack unwinder
 *
 * In non-protected mode, the unwinding is done from kernel proper context
 * (by the host in EL1).
 */

DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);

/*
 * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
 *
 * The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to
 * allow for guard pages below the stack. Consequently, the fixed offset address
 * translation macros won't work here.
 *
 * The kernel VA is calculated as an offset from the kernel VA of the hypervisor
 * stack base.
 *
 * Returns true on success and updates @addr to its corresponding kernel VA;
 * otherwise returns false.
 */
static inline bool kvm_nvhe_stack_kern_va(unsigned long *addr,
					  enum stack_type type)
{
	struct kvm_nvhe_stacktrace_info *stacktrace_info;
	unsigned long hyp_base, kern_base, hyp_offset;

	stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);

	switch (type) {
	case STACK_TYPE_HYP:
		kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
		hyp_base = (unsigned long)stacktrace_info->stack_base;
		break;
	case STACK_TYPE_OVERFLOW:
		kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
		hyp_base = (unsigned long)stacktrace_info->overflow_stack_base;
		break;
	default:
		return false;
	}

	hyp_offset = *addr - hyp_base;

	*addr = kern_base + hyp_offset;

	return true;
}

static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
				     struct stack_info *info)
{
	struct kvm_nvhe_stacktrace_info *stacktrace_info
				= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
	unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
	unsigned long high = low + OVERFLOW_STACK_SIZE;

	return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
}

static inline bool on_hyp_stack(unsigned long sp, unsigned long size,
				struct stack_info *info)
{
	struct kvm_nvhe_stacktrace_info *stacktrace_info
				= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
	unsigned long low = (unsigned long)stacktrace_info->stack_base;
	unsigned long high = low + PAGE_SIZE;

	return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
}

static inline int notrace unwind_next(struct unwind_state *state)
{
	struct stack_info info;

	return unwind_next_common(state, &info, kvm_nvhe_stack_kern_va);
}
NOKPROBE_SYMBOL(unwind_next);

void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);

#endif	/* __KVM_NVHE_HYPERVISOR__ */
#endif	/* __ASM_STACKTRACE_NVHE_H */