summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/stacktrace
diff options
context:
space:
mode:
authorKalesh Singh <kaleshsingh@google.com>2022-07-26 00:37:41 -0700
committerMarc Zyngier <maz@kernel.org>2022-07-26 10:49:16 +0100
commit573e1e8275f7167ddd533c6e4e0f500f8be4d974 (patch)
treed7d2ffa2d12448f083d2d40ea50bc18596ca3c40 /arch/arm64/include/asm/stacktrace
parent548ec3336f323db56260b312c232ab37285f0284 (diff)
KVM: arm64: Stub implementation of non-protected nVHE HYP stack unwinder
Add stub implementations of non-protected nVHE stack unwinder, for building. These are implemented later in this series. Signed-off-by: Kalesh Singh <kaleshsingh@google.com> Reviewed-by: Fuad Tabba <tabba@google.com> Tested-by: Fuad Tabba <tabba@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20220726073750.3219117-9-kaleshsingh@google.com
Diffstat (limited to 'arch/arm64/include/asm/stacktrace')
-rw-r--r--arch/arm64/include/asm/stacktrace/nvhe.h47
1 files changed, 47 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
new file mode 100644
index 000000000000..1192ae0f80c1
--- /dev/null
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * KVM nVHE hypervisor stack tracing support.
+ *
+ * The unwinder implementation depends on the nVHE mode:
+ *
+ * 1) Non-protected nVHE mode - the host can directly access the
+ * HYP stack pages and unwind the HYP stack in EL1. This saves having
+ * to allocate shared buffers for the host to read the unwinded
+ * stacktrace.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+#ifndef __ASM_STACKTRACE_NVHE_H
+#define __ASM_STACKTRACE_NVHE_H
+
+#include <asm/stacktrace/common.h>
+
+static inline bool on_accessible_stack(const struct task_struct *tsk,
+ unsigned long sp, unsigned long size,
+ struct stack_info *info)
+{
+ return false;
+}
+
+#ifndef __KVM_NVHE_HYPERVISOR__
+/*
+ * Conventional (non-protected) nVHE HYP stack unwinder
+ *
+ * In non-protected mode, the unwinding is done from kernel proper context
+ * (by the host in EL1).
+ */
+
+static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
+ struct stack_info *info)
+{
+ return false;
+}
+
+static inline int notrace unwind_next(struct unwind_state *state)
+{
+ return 0;
+}
+NOKPROBE_SYMBOL(unwind_next);
+
+#endif /* !__KVM_NVHE_HYPERVISOR__ */
+#endif /* __ASM_STACKTRACE_NVHE_H */