summaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-01 20:05:19 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-01 20:05:19 -0700
commit79ef0c00142519bc34e1341447f3797436cc48bf (patch)
tree200c1ada8b4f31b79ec8a67939ca5fbcd0339958 /arch/arm/kernel
parentd54f486035fd89f14845a7f34a97a3f5da4e70f2 (diff)
parentfeea69ec121f067073868cebe0cb9d003e64ad80 (diff)
Merge tag 'trace-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: - kprobes: Restructured stack unwinder to show properly on x86 when a stack dump happens from a kretprobe callback. - Fix to bootconfig parsing - Have tracefs allow owner and group permissions by default (only denying others). There's been pressure to allow non root to tracefs in a controlled fashion, and using groups is probably the safest. - Bootconfig memory managament updates. - Bootconfig clean up to have the tools directory be less dependent on changes in the kernel tree. - Allow perf to be traced by function tracer. - Rewrite of function graph tracer to be a callback from the function tracer instead of having its own trampoline (this change will happen on an arch by arch basis, and currently only x86_64 implements it). - Allow multiple direct trampolines (bpf hooks to functions) be batched together in one synchronization. - Allow histogram triggers to add variables that can perform calculations against the event's fields. - Use the linker to determine architecture callbacks from the ftrace trampoline to allow for proper parameter prototypes and prevent warnings from the compiler. - Extend histogram triggers to key off of variables. - Have trace recursion use bit magic to determine preempt context over if branches. - Have trace recursion disable preemption as all use cases do anyway. - Added testing for verification of tracing utilities. - Various small clean ups and fixes. * tag 'trace-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (101 commits) tracing/histogram: Fix semicolon.cocci warnings tracing/histogram: Fix documentation inline emphasis warning tracing: Increase PERF_MAX_TRACE_SIZE to handle Sentinel1 and docker together tracing: Show size of requested perf buffer bootconfig: Initialize ret in xbc_parse_tree() ftrace: do CPU checking after preemption disabled ftrace: disable preemption when recursion locked tracing/histogram: Document expression arithmetic and constants tracing/histogram: Optimize division by a power of 2 tracing/histogram: Covert expr to const if both operands are constants tracing/histogram: Simplify handling of .sym-offset in expressions tracing: Fix operator precedence for hist triggers expression tracing: Add division and multiplication support for hist triggers tracing: Add support for creating hist trigger variables from literal selftests/ftrace: Stop tracing while reading the trace file by default MAINTAINERS: Update KPROBES and TRACING entries test_kprobes: Move it from kernel/ to lib/ docs, kprobes: Remove invalid URL and add new reference samples/kretprobes: Fix return value if register_kretprobe() failed lib/bootconfig: Fix the xbc_get_info kerneldoc ...
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/ftrace.c5
-rw-r--r--arch/arm/kernel/return_address.c4
-rw-r--r--arch/arm/kernel/stacktrace.c17
3 files changed, 19 insertions, 7 deletions
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 3c83b5d29697..a006585e1c09 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -193,11 +193,6 @@ int ftrace_make_nop(struct module *mod,
return ret;
}
-
-int __init ftrace_dyn_arch_init(void)
-{
- return 0;
-}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 7b42ac010fdf..00c11579406c 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -42,6 +42,10 @@ void *return_address(unsigned int level)
frame.sp = current_stack_pointer;
frame.lr = (unsigned long)__builtin_return_address(0);
frame.pc = (unsigned long)return_address;
+#ifdef CONFIG_KRETPROBES
+ frame.kr_cur = NULL;
+ frame.tsk = current;
+#endif
walk_stackframe(&frame, save_return_addr, &data);
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 76ea4178a55c..75e905508f27 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/export.h>
+#include <linux/kprobes.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/stacktrace.h>
@@ -54,8 +55,7 @@ int notrace unwind_frame(struct stackframe *frame)
frame->sp = frame->fp;
frame->fp = *(unsigned long *)(fp);
- frame->pc = frame->lr;
- frame->lr = *(unsigned long *)(fp + 4);
+ frame->pc = *(unsigned long *)(fp + 4);
#else
/* check current frame pointer is within bounds */
if (fp < low + 12 || fp > high - 4)
@@ -66,6 +66,11 @@ int notrace unwind_frame(struct stackframe *frame)
frame->sp = *(unsigned long *)(fp - 8);
frame->pc = *(unsigned long *)(fp - 4);
#endif
+#ifdef CONFIG_KRETPROBES
+ if (is_kretprobe_trampoline(frame->pc))
+ frame->pc = kretprobe_find_ret_addr(frame->tsk,
+ (void *)frame->fp, &frame->kr_cur);
+#endif
return 0;
}
@@ -157,6 +162,10 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
frame.lr = (unsigned long)__builtin_return_address(0);
frame.pc = (unsigned long)__save_stack_trace;
}
+#ifdef CONFIG_KRETPROBES
+ frame.kr_cur = NULL;
+ frame.tsk = tsk;
+#endif
walk_stackframe(&frame, save_trace, &data);
}
@@ -174,6 +183,10 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
frame.sp = regs->ARM_sp;
frame.lr = regs->ARM_lr;
frame.pc = regs->ARM_pc;
+#ifdef CONFIG_KRETPROBES
+ frame.kr_cur = NULL;
+ frame.tsk = current;
+#endif
walk_stackframe(&frame, save_trace, &data);
}