summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/stacktrace.c
blob: 9812aefdcfb49416142e1d03b3e9e12befe38cb3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * KVM nVHE hypervisor stack tracing support.
 *
 * The unwinder implementation depends on the nVHE mode:
 *
 *   1) Non-protected nVHE mode - the host can directly access the
 *      HYP stack pages and unwind the HYP stack in EL1. This saves having
 *      to allocate shared buffers for the host to read the unwinded
 *      stacktrace.
 *
 *   2) pKVM (protected nVHE) mode - the host cannot directly access
 *      the HYP memory. The stack is unwinded in EL2 and dumped to a shared
 *      buffer where the host can read and print the stacktrace.
 *
 * Copyright (C) 2022 Google LLC
 */

#include <linux/kvm.h>
#include <linux/kvm_host.h>

#include <asm/stacktrace/nvhe.h>

/*
 * kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry
 *
 * @arg    : the hypervisor offset, used for address translation
 * @where  : the program counter corresponding to the stack frame
 */
static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
{
	unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
	unsigned long hyp_offset = (unsigned long)arg;

	/* Mask tags and convert to kern addr */
	where = (where & va_mask) + hyp_offset;
	kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset()));

	return true;
}

static void kvm_nvhe_dump_backtrace_start(void)
{
	kvm_err("nVHE call trace:\n");
}

static void kvm_nvhe_dump_backtrace_end(void)
{
	kvm_err("---[ end nVHE call trace ]---\n");
}

/*
 * hyp_dump_backtrace - Dump the non-protected nVHE backtrace.
 *
 * @hyp_offset: hypervisor offset, used for address translation.
 *
 * The host can directly access HYP stack pages in non-protected
 * mode, so the unwinding is done directly from EL1. This removes
 * the need for shared buffers between host and hypervisor for
 * the stacktrace.
 */
static void hyp_dump_backtrace(unsigned long hyp_offset)
{
	struct kvm_nvhe_stacktrace_info *stacktrace_info;
	struct unwind_state state;

	stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);

	kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);

	kvm_nvhe_dump_backtrace_start();
	unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset);
	kvm_nvhe_dump_backtrace_end();
}

#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
			 pkvm_stacktrace);

/*
 * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace.
 *
 * @hyp_offset: hypervisor offset, used for address translation.
 *
 * Dumping of the pKVM HYP backtrace is done by reading the
 * stack addresses from the shared stacktrace buffer, since the
 * host cannot directly access hypervisor memory in protected
 * mode.
 */
static void pkvm_dump_backtrace(unsigned long hyp_offset)
{
	unsigned long *stacktrace
		= (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
	int i, size = NVHE_STACKTRACE_SIZE / sizeof(long);

	kvm_nvhe_dump_backtrace_start();
	/* The saved stacktrace is terminated by a null entry */
	for (i = 0; i < size && stacktrace[i]; i++)
		kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]);
	kvm_nvhe_dump_backtrace_end();
}
#else	/* !CONFIG_PROTECTED_NVHE_STACKTRACE */
static void pkvm_dump_backtrace(unsigned long hyp_offset)
{
	kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
}
#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */

/*
 * kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace.
 *
 * @hyp_offset: hypervisor offset, used for address translation.
 */
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
{
	if (is_protected_kvm_enabled())
		pkvm_dump_backtrace(hyp_offset);
	else
		hyp_dump_backtrace(hyp_offset);
}