summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/stacktrace.c
diff options
context:
space:
mode:
authorSong Liu <song@kernel.org>2025-05-21 12:10:00 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2025-06-20 13:10:09 +0100
commit805f13e403cd43bb88790642feef507108af6fc7 (patch)
treef7eab2a265d63aedc5b75f309c0bf8dad77aad14 /arch/arm64/kernel/stacktrace.c
parentbeecfd6a88a675e20987e70ec532ba734b230fa4 (diff)
arm64: stacktrace: Implement arch_stack_walk_reliable()
Add arch_stack_walk_reliable(), which will be used during kernel live patching to detect when threads have completed executing old versions of functions. Note that arch_stack_walk_reliable() only needs to guarantee that it returns an error code when it cannot provide a reliable stacktrace. It is not required to provide a reliable stacktrace in all scenarios so long as it returns said error code. At present we can only reliably unwind up to an exception boundary. In future we should be able to improve this with additional data from the compiler (e.g. sframe). Signed-off-by: Song Liu <song@kernel.org> Link: https://lore.kernel.org/r/20250320171559.3423224-2-song@kernel.org [ Mark: Simplify logic, clarify commit message ] Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Andrea della Porta <andrea.porta@suse.com> Cc: Breno Leitao <leitao@debian.org> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Petr Mladek <pmladek@suse.com> Cc: Song Liu <song@kernel.org> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20250521111000.2237470-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/kernel/stacktrace.c')
-rw-r--r--arch/arm64/kernel/stacktrace.c53
1 files changed, 43 insertions, 10 deletions
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index f6494c094214..acf682afbd47 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -279,21 +279,24 @@ kunwind_next(struct kunwind_state *state)
typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
-static __always_inline void
+static __always_inline int
do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
void *cookie)
{
- if (kunwind_recover_return_address(state))
- return;
+ int ret;
- while (1) {
- int ret;
+ ret = kunwind_recover_return_address(state);
+ if (ret)
+ return ret;
+ while (1) {
if (!consume_state(state, cookie))
- break;
+ return -EINVAL;
ret = kunwind_next(state);
+ if (ret == -ENOENT)
+ return 0;
if (ret < 0)
- break;
+ return ret;
}
}
@@ -326,7 +329,7 @@ do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
: stackinfo_get_unknown(); \
})
-static __always_inline void
+static __always_inline int
kunwind_stack_walk(kunwind_consume_fn consume_state,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
@@ -354,7 +357,7 @@ kunwind_stack_walk(kunwind_consume_fn consume_state,
if (regs) {
if (task != current)
- return;
+ return -EINVAL;
kunwind_init_from_regs(&state, regs);
} else if (task == current) {
kunwind_init_from_caller(&state);
@@ -362,7 +365,7 @@ kunwind_stack_walk(kunwind_consume_fn consume_state,
kunwind_init_from_task(&state, task);
}
- do_kunwind(&state, consume_state, cookie);
+ return do_kunwind(&state, consume_state, cookie);
}
struct kunwind_consume_entry_data {
@@ -389,6 +392,36 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
}
+static __always_inline bool
+arch_reliable_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
+{
+ /*
+ * At an exception boundary we can reliably consume the saved PC. We do
+ * not know whether the LR was live when the exception was taken, and
+ * so we cannot perform the next unwind step reliably.
+ *
+ * All that matters is whether the *entire* unwind is reliable, so give
+ * up as soon as we hit an exception boundary.
+ */
+ if (state->source == KUNWIND_SOURCE_REGS_PC)
+ return false;
+
+ return arch_kunwind_consume_entry(state, cookie);
+}
+
+noinline noinstr int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+ void *cookie,
+ struct task_struct *task)
+{
+ struct kunwind_consume_entry_data data = {
+ .consume_entry = consume_entry,
+ .cookie = cookie,
+ };
+
+ return kunwind_stack_walk(arch_reliable_kunwind_consume_entry, &data,
+ task, NULL);
+}
+
struct bpf_unwind_consume_entry_data {
bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
void *cookie;