summaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2010-11-26 10:28:11 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-11-26 10:28:11 +0000
commit83cf1eecfe9afee99d6b86f963187acd414c019d (patch)
tree4381bdcc45d81c8c3713630cae9660d770885d96 /arch/arm
parentf1690d17d2c7afa2a2079e3c91eb2bca8c1c5ecd (diff)
parent0e341af835fdf553820a1fa98341b93ab32ce466 (diff)
Merge branch 'ftrace' of git://github.com/rabinv/linux-2.6 into devel-stable
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/include/asm/system.h5
-rw-r--r--arch/arm/include/asm/traps.h23
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/entry-common.S202
-rw-r--r--arch/arm/kernel/ftrace.c103
-rw-r--r--arch/arm/kernel/irq.c4
-rw-r--r--arch/arm/kernel/smp.c5
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/plat-versatile/sched-clock.c1
11 files changed, 273 insertions, 77 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c8fa1443e2c1..49778bb43782 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -14,6 +14,7 @@ config ARM
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL)
+ select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 2fd0b99afc4b..eac62085f5b2 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -23,7 +23,7 @@ config STRICT_DEVMEM
config FRAME_POINTER
bool
depends on !THUMB2_KERNEL
- default y if !ARM_UNWIND
+ default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER
help
If you say N here, the resulting kernel will be slightly smaller and
faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 1120f18a6b17..ec4327a4653d 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -63,6 +63,11 @@
#include <asm/outercache.h>
#define __exception __attribute__((section(".exception.text")))
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#define __exception_irq_entry __irq_entry
+#else
+#define __exception_irq_entry __exception
+#endif
struct thread_info;
struct task_struct;
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index 491960bf4260..124475afb007 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -15,13 +15,32 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static inline int __in_irqentry_text(unsigned long ptr)
+{
+ extern char __irqentry_text_start[];
+ extern char __irqentry_text_end[];
+
+ return ptr >= (unsigned long)&__irqentry_text_start &&
+ ptr < (unsigned long)&__irqentry_text_end;
+}
+#else
+static inline int __in_irqentry_text(unsigned long ptr)
+{
+ return 0;
+}
+#endif
+
static inline int in_exception_text(unsigned long ptr)
{
extern char __exception_text_start[];
extern char __exception_text_end[];
+ int in;
+
+ in = ptr >= (unsigned long)&__exception_text_start &&
+ ptr < (unsigned long)&__exception_text_end;
- return ptr >= (unsigned long)&__exception_text_start &&
- ptr < (unsigned long)&__exception_text_end;
+ return in ? : __in_irqentry_text(ptr);
}
extern void __init early_trap_init(void);
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5b9b268f4fbb..679851a9f589 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -5,7 +5,7 @@
CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
-ifdef CONFIG_DYNAMIC_FTRACE
+ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = -pg
endif
@@ -33,6 +33,7 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o
obj-$(CONFIG_ATAGS_PROC) += atags.o
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 8bfa98757cd2..aae802ee12f8 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -141,98 +141,170 @@ ENDPROC(ret_from_fork)
#endif
#endif
-#ifdef CONFIG_DYNAMIC_FTRACE
-ENTRY(__gnu_mcount_nc)
- mov ip, lr
- ldmia sp!, {lr}
- mov pc, ip
-ENDPROC(__gnu_mcount_nc)
+.macro __mcount suffix
+ mcount_enter
+ ldr r0, =ftrace_trace_function
+ ldr r2, [r0]
+ adr r0, .Lftrace_stub
+ cmp r0, r2
+ bne 1f
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ ldr r1, =ftrace_graph_return
+ ldr r2, [r1]
+ cmp r0, r2
+ bne ftrace_graph_caller\suffix
+
+ ldr r1, =ftrace_graph_entry
+ ldr r2, [r1]
+ ldr r0, =ftrace_graph_entry_stub
+ cmp r0, r2
+ bne ftrace_graph_caller\suffix
+#endif
-ENTRY(ftrace_caller)
- stmdb sp!, {r0-r3, lr}
- mov r0, lr
+ mcount_exit
+
+1: mcount_get_lr r1 @ lr of instrumented func
+ mov r0, lr @ instrumented function
+ sub r0, r0, #MCOUNT_INSN_SIZE
+ adr lr, BSYM(2f)
+ mov pc, r2
+2: mcount_exit
+.endm
+
+.macro __ftrace_caller suffix
+ mcount_enter
+
+ mcount_get_lr r1 @ lr of instrumented func
+ mov r0, lr @ instrumented function
sub r0, r0, #MCOUNT_INSN_SIZE
- ldr r1, [sp, #20]
- .global ftrace_call
-ftrace_call:
+ .globl ftrace_call\suffix
+ftrace_call\suffix:
bl ftrace_stub
- ldmia sp!, {r0-r3, ip, lr}
- mov pc, ip
-ENDPROC(ftrace_caller)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .globl ftrace_graph_call\suffix
+ftrace_graph_call\suffix:
+ mov r0, r0
+#endif
+
+ mcount_exit
+.endm
+
+.macro __ftrace_graph_caller
+ sub r0, fp, #4 @ &lr of instrumented routine (&parent)
+#ifdef CONFIG_DYNAMIC_FTRACE
+ @ called from __ftrace_caller, saved in mcount_enter
+ ldr r1, [sp, #16] @ instrumented routine (func)
+#else
+ @ called from __mcount, untouched in lr
+ mov r1, lr @ instrumented routine (func)
+#endif
+ sub r1, r1, #MCOUNT_INSN_SIZE
+ mov r2, fp @ frame pointer
+ bl prepare_ftrace_return
+ mcount_exit
+.endm
#ifdef CONFIG_OLD_MCOUNT
+/*
+ * mcount
+ */
+
+.macro mcount_enter
+ stmdb sp!, {r0-r3, lr}
+.endm
+
+.macro mcount_get_lr reg
+ ldr \reg, [fp, #-4]
+.endm
+
+.macro mcount_exit
+ ldr lr, [fp, #-4]
+ ldmia sp!, {r0-r3, pc}
+.endm
+
ENTRY(mcount)
+#ifdef CONFIG_DYNAMIC_FTRACE
stmdb sp!, {lr}
ldr lr, [fp, #-4]
ldmia sp!, {pc}
+#else
+ __mcount _old
+#endif
ENDPROC(mcount)
+#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller_old)
- stmdb sp!, {r0-r3, lr}
- ldr r1, [fp, #-4]
- mov r0, lr
- sub r0, r0, #MCOUNT_INSN_SIZE
-
- .globl ftrace_call_old
-ftrace_call_old:
- bl ftrace_stub
- ldr lr, [fp, #-4] @ restore lr
- ldmia sp!, {r0-r3, pc}
+ __ftrace_caller _old
ENDPROC(ftrace_caller_old)
#endif
-#else
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller_old)
+ __ftrace_graph_caller
+ENDPROC(ftrace_graph_caller_old)
+#endif
-ENTRY(__gnu_mcount_nc)
+.purgem mcount_enter
+.purgem mcount_get_lr
+.purgem mcount_exit
+#endif
+
+/*
+ * __gnu_mcount_nc
+ */
+
+.macro mcount_enter
stmdb sp!, {r0-r3, lr}
- ldr r0, =ftrace_trace_function
- ldr r2, [r0]
- adr r0, .Lftrace_stub
- cmp r0, r2
- bne gnu_trace
+.endm
+
+.macro mcount_get_lr reg
+ ldr \reg, [sp, #20]
+.endm
+
+.macro mcount_exit
ldmia sp!, {r0-r3, ip, lr}
mov pc, ip
+.endm
-gnu_trace:
- ldr r1, [sp, #20] @ lr of instrumented routine
- mov r0, lr
- sub r0, r0, #MCOUNT_INSN_SIZE
- adr lr, BSYM(1f)
- mov pc, r2
-1:
- ldmia sp!, {r0-r3, ip, lr}
+ENTRY(__gnu_mcount_nc)
+#ifdef CONFIG_DYNAMIC_FTRACE
+ mov ip, lr
+ ldmia sp!, {lr}
mov pc, ip
+#else
+ __mcount
+#endif
ENDPROC(__gnu_mcount_nc)
-#ifdef CONFIG_OLD_MCOUNT
-/*
- * This is under an ifdef in order to force link-time errors for people trying
- * to build with !FRAME_POINTER with a GCC which doesn't use the new-style
- * mcount.
- */
-ENTRY(mcount)
- stmdb sp!, {r0-r3, lr}
- ldr r0, =ftrace_trace_function
- ldr r2, [r0]
- adr r0, ftrace_stub
- cmp r0, r2
- bne trace
- ldr lr, [fp, #-4] @ restore lr
- ldmia sp!, {r0-r3, pc}
+#ifdef CONFIG_DYNAMIC_FTRACE
+ENTRY(ftrace_caller)
+ __ftrace_caller
+ENDPROC(ftrace_caller)
+#endif
-trace:
- ldr r1, [fp, #-4] @ lr of instrumented routine
- mov r0, lr
- sub r0, r0, #MCOUNT_INSN_SIZE
- mov lr, pc
- mov pc, r2
- ldr lr, [fp, #-4] @ restore lr
- ldmia sp!, {r0-r3, pc}
-ENDPROC(mcount)
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+ __ftrace_graph_caller
+ENDPROC(ftrace_graph_caller)
#endif
-#endif /* CONFIG_DYNAMIC_FTRACE */
+.purgem mcount_enter
+.purgem mcount_get_lr
+.purgem mcount_exit
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .globl return_to_handler
+return_to_handler:
+ stmdb sp!, {r0-r3}
+ mov r0, fp @ frame pointer
+ bl ftrace_return_to_handler
+ mov lr, r0 @ r0 has real ret addr
+ ldmia sp!, {r0-r3}
+ mov pc, lr
+#endif
ENTRY(ftrace_stub)
.Lftrace_stub:
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 971ac8c36ea7..c0062ad1e847 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -24,6 +24,7 @@
#define NOP 0xe8bd4000 /* pop {lr} */
#endif
+#ifdef CONFIG_DYNAMIC_FTRACE
#ifdef CONFIG_OLD_MCOUNT
#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
@@ -59,9 +60,9 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
}
#endif
-/* construct a branch (BL) instruction to addr */
#ifdef CONFIG_THUMB2_KERNEL
-static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
+static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
+ bool link)
{
unsigned long s, j1, j2, i1, i2, imm10, imm11;
unsigned long first, second;
@@ -83,15 +84,22 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
j2 = (!i2) ^ s;
first = 0xf000 | (s << 10) | imm10;
- second = 0xd000 | (j1 << 13) | (j2 << 11) | imm11;
+ second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;
+ if (link)
+ second |= 1 << 14;
return (second << 16) | first;
}
#else
-static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
+static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
+ bool link)
{
+ unsigned long opcode = 0xea000000;
long offset;
+ if (link)
+ opcode |= 1 << 24;
+
offset = (long)addr - (long)(pc + 8);
if (unlikely(offset < -33554432 || offset > 33554428)) {
/* Can't generate branches that far (from ARM ARM). Ftrace
@@ -103,10 +111,15 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
offset = (offset >> 2) & 0x00ffffff;
- return 0xeb000000 | offset;
+ return opcode | offset;
}
#endif
+static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
+{
+ return ftrace_gen_branch(pc, addr, true);
+}
+
static int ftrace_modify_code(unsigned long pc, unsigned long old,
unsigned long new)
{
@@ -193,3 +206,83 @@ int __init ftrace_dyn_arch_init(void *data)
return 0;
}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ unsigned long frame_pointer)
+{
+ unsigned long return_hooker = (unsigned long) &return_to_handler;
+ struct ftrace_graph_ent trace;
+ unsigned long old;
+ int err;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ old = *parent;
+ *parent = return_hooker;
+
+ err = ftrace_push_return_trace(old, self_addr, &trace.depth,
+ frame_pointer);
+ if (err == -EBUSY) {
+ *parent = old;
+ return;
+ }
+
+ trace.func = self_addr;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ *parent = old;
+ }
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern unsigned long ftrace_graph_call;
+extern unsigned long ftrace_graph_call_old;
+extern void ftrace_graph_caller_old(void);
+
+static int __ftrace_modify_caller(unsigned long *callsite,
+ void (*func) (void), bool enable)
+{
+ unsigned long caller_fn = (unsigned long) func;
+ unsigned long pc = (unsigned long) callsite;
+ unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
+ unsigned long nop = 0xe1a00000; /* mov r0, r0 */
+ unsigned long old = enable ? nop : branch;
+ unsigned long new = enable ? branch : nop;
+
+ return ftrace_modify_code(pc, old, new);
+}
+
+static int ftrace_modify_graph_caller(bool enable)
+{
+ int ret;
+
+ ret = __ftrace_modify_caller(&ftrace_graph_call,
+ ftrace_graph_caller,
+ enable);
+
+#ifdef CONFIG_OLD_MCOUNT
+ if (!ret)
+ ret = __ftrace_modify_caller(&ftrace_graph_call_old,
+ ftrace_graph_caller_old,
+ enable);
+#endif
+
+ return ret;
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 36ad3be4692a..6d616333340f 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -35,6 +35,7 @@
#include <linux/list.h>
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
+#include <linux/ftrace.h>
#include <asm/system.h>
#include <asm/mach/irq.h>
@@ -105,7 +106,8 @@ unlock:
* come via this function. Instead, they should provide their
* own 'handler'
*/
-asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
+asmlinkage void __exception_irq_entry
+asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 8c1959590252..bbca89872c18 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -16,6 +16,7 @@
#include <linux/cache.h>
#include <linux/profile.h>
#include <linux/errno.h>
+#include <linux/ftrace.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/cpu.h>
@@ -457,7 +458,7 @@ static void ipi_timer(void)
}
#ifdef CONFIG_LOCAL_TIMERS
-asmlinkage void __exception do_local_timer(struct pt_regs *regs)
+asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
int cpu = smp_processor_id();
@@ -544,7 +545,7 @@ static void ipi_cpu_stop(unsigned int cpu)
*
* Bit 0 - Inter-processor function call
*/
-asmlinkage void __exception do_IPI(struct pt_regs *regs)
+asmlinkage void __exception_irq_entry do_IPI(struct pt_regs *regs)
{
unsigned int cpu = smp_processor_id();
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index cead8893b46b..897c1a8f1694 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -101,6 +101,7 @@ SECTIONS
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
+ IRQENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
diff --git a/arch/arm/plat-versatile/sched-clock.c b/arch/arm/plat-versatile/sched-clock.c
index 9768cf7e83d7..9696ddc238c9 100644
--- a/arch/arm/plat-versatile/sched-clock.c
+++ b/arch/arm/plat-versatile/sched-clock.c
@@ -20,6 +20,7 @@
*/
#include <linux/cnt32_to_63.h>
#include <linux/io.h>
+#include <linux/sched.h>
#include <asm/div64.h>
#include <mach/hardware.h>