summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/stacktrace.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2019-07-02 14:07:29 +0100
committerWill Deacon <will@kernel.org>2019-07-22 11:44:15 +0100
commit592700f094be229b5c9cc1192d5cea46eb4c7afc (patch)
tree94b98dd3d350970646a4d5d40227daa00d645945 /arch/arm64/kernel/stacktrace.c
parentf3dcbe67ed424f1cf92065f9ad0cc647f2b44eac (diff)
arm64: stacktrace: Better handle corrupted stacks
The arm64 stacktrace code is careful to only dereference frame records in valid stack ranges, ensuring that a corrupted frame record won't result in a faulting access. However, it's still possible for corrupt frame records to result in infinite loops in the stacktrace code, which is also undesirable. This patch ensures that we complete a stacktrace in finite time, by keeping track of which stacks we have already completed unwinding, and verifying that if the next frame record is on the same stack, it is at a higher address. As this has turned out to be particularly subtle, comments are added to explain the procedure. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: James Morse <james.morse@arm.com> Tested-by: James Morse <james.morse@arm.com> Acked-by: Dave Martin <Dave.Martin@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Tengfei Fan <tengfeif@codeaurora.org> Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kernel/stacktrace.c')
-rw-r--r--arch/arm64/kernel/stacktrace.c40
1 files changed, 39 insertions, 1 deletions
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 017972c2de90..2b160ae594eb 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -29,9 +29,18 @@
* ldp x29, x30, [sp]
* add sp, sp, #0x10
*/
+
+/*
+ * Unwind from one frame record (A) to the next frame record (B).
+ *
+ * We terminate early if the location of B indicates a malformed chain of frame
+ * records (e.g. a cycle), determined based on the location and fp value of A
+ * and the location (but not the fp value) of B.
+ */
int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
{
unsigned long fp = frame->fp;
+ struct stack_info info;
if (fp & 0xf)
return -EINVAL;
@@ -39,11 +48,40 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
if (!tsk)
tsk = current;
- if (!on_accessible_stack(tsk, fp, NULL))
+ if (!on_accessible_stack(tsk, fp, &info))
return -EINVAL;
+ if (test_bit(info.type, frame->stacks_done))
+ return -EINVAL;
+
+ /*
+ * As stacks grow downward, any valid record on the same stack must be
+ * at a strictly higher address than the prior record.
+ *
+ * Stacks can nest in several valid orders, e.g.
+ *
+ * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
+ * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
+ *
+ * ... but the nesting itself is strict. Once we transition from one
+ * stack to another, it's never valid to unwind back to that first
+ * stack.
+ */
+ if (info.type == frame->prev_type) {
+ if (fp <= frame->prev_fp)
+ return -EINVAL;
+ } else {
+ set_bit(frame->prev_type, frame->stacks_done);
+ }
+
+ /*
+ * Record this frame record's values and location. The prev_fp and
+ * prev_type are only meaningful to the next unwind_frame() invocation.
+ */
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
+ frame->prev_fp = fp;
+ frame->prev_type = info.type;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&