summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/entry-ftrace.S
diff options
context:
space:
mode:
authorChengming Zhou <zhouchengming@bytedance.com>2022-04-21 00:00:06 +0800
committerCatalin Marinas <catalin.marinas@arm.com>2022-04-29 19:21:12 +0100
commitc4a0ebf87cebbfa28d56e7d93b2536e2311e30c9 (patch)
tree4bac1e78233bb63eebbc32430d88feac9326e2c7 /arch/arm64/kernel/entry-ftrace.S
parente999995c84c3abb6dcae83f8c35942a8d4ee0451 (diff)
arm64/ftrace: Make function graph use ftrace directly
As we do in commit 0c0593b45c9b ("x86/ftrace: Make function graph use ftrace directly"), we don't need special hook for graph tracer, but instead we use graph_ops:func function to install return_hooker. Since commit 3b23e4991fb6 ("arm64: implement ftrace with regs") add implementation for FTRACE_WITH_REGS on arm64, we can easily adopt the same cleanup on arm64. And this cleanup only changes the FTRACE_WITH_REGS implementation, so the mcount-based implementation is unaffected. While in theory it would be possible to make a similar cleanup for !FTRACE_WITH_REGS, this will require rework of the core code, and so for now we only change the FTRACE_WITH_REGS implementation. Tested-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Link: https://lore.kernel.org/r/20220420160006.17880-2-zhouchengming@bytedance.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/kernel/entry-ftrace.S')
-rw-r--r--arch/arm64/kernel/entry-ftrace.S17
1 files changed, 0 insertions, 17 deletions
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index e535480a4069..d42a205ef625 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -97,12 +97,6 @@ SYM_CODE_START(ftrace_common)
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
bl ftrace_stub
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
- nop // If enabled, this will be replaced
- // "b ftrace_graph_caller"
-#endif
-
/*
* At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
* x19-x29 per the AAPCS, and we created frame records upon entry, so we need
@@ -127,17 +121,6 @@ ftrace_common_return:
ret x9
SYM_CODE_END(ftrace_common)
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-SYM_CODE_START(ftrace_graph_caller)
- ldr x0, [sp, #S_PC]
- sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
- add x1, sp, #S_LR // parent_ip (callsite's LR)
- ldr x2, [sp, #PT_REGS_SIZE] // parent fp (callsite's FP)
- bl prepare_ftrace_return
- b ftrace_common_return
-SYM_CODE_END(ftrace_graph_caller)
-#endif
-
#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
/*