summaryrefslogtreecommitdiff
path: root/arch/mips/kernel/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/ftrace.c')
-rw-r--r--arch/mips/kernel/ftrace.c173
1 files changed, 113 insertions, 60 deletions
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index dba90ec0dc38..b15615b28569 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Code for replacing ftrace calls with jumps.
*
@@ -11,11 +12,14 @@
#include <linux/uaccess.h>
#include <linux/init.h>
#include <linux/ftrace.h>
+#include <linux/syscalls.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
+#include <asm/syscall.h>
#include <asm/uasm.h>
+#include <asm/unistd.h>
#include <asm-generic/sections.h>
@@ -33,24 +37,6 @@ void arch_ftrace_update_code(int command)
ftrace_modify_all_code(command);
}
-#endif
-
-/*
- * Check if the address is in kernel space
- *
- * Clone core_kernel_text() from kernel/extable.c, but doesn't call
- * init_kernel_text() for Ftrace doesn't trace functions in init sections.
- */
-static inline int in_kernel_space(unsigned long ip)
-{
- if (ip >= (unsigned long)_stext &&
- ip <= (unsigned long)_etext)
- return 1;
- return 0;
-}
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
#define JUMP_RANGE_MASK ((1UL << 28) - 1)
@@ -60,7 +46,7 @@ static inline int in_kernel_space(unsigned long ip)
((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
static unsigned int insn_jal_ftrace_caller __read_mostly;
-static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
+static unsigned int insn_la_mcount[2] __read_mostly;
static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
static inline void ftrace_dyn_arch_init_insns(void)
@@ -68,10 +54,20 @@ static inline void ftrace_dyn_arch_init_insns(void)
u32 *buf;
unsigned int v1;
- /* lui v1, hi16_mcount */
- v1 = 3;
- buf = (u32 *)&insn_lui_v1_hi16_mcount;
- UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
+ /* If we are not in compat space, the number of generated
+ * instructions will exceed the maximum expected limit of 2.
+ * To prevent buffer overflow, we avoid generating them.
+ * insn_la_mcount will not be used later in ftrace_make_call.
+ */
+ if (uasm_in_compat_space_p(MCOUNT_ADDR)) {
+ /* la v1, _mcount */
+ v1 = 3;
+ buf = (u32 *)&insn_la_mcount[0];
+ UASM_i_LA(&buf, v1, MCOUNT_ADDR);
+ } else {
+ pr_warn("ftrace: mcount address beyond 32 bits is not supported (%lX)\n",
+ MCOUNT_ADDR);
+ }
/* jal (ftrace_caller + 8), jump over the first two instruction */
buf = (u32 *)&insn_jal_ftrace_caller;
@@ -108,11 +104,35 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
+
ip += 4;
safe_store_code(new_code2, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
- flush_icache_range(ip, ip + 8); /* original ip + 12 */
+
+ ip -= 4;
+ flush_icache_range(ip, ip + 8);
+
+ return 0;
+}
+
+static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
+ unsigned int new_code2)
+{
+ int faulted;
+
+ ip += 4;
+ safe_store_code(new_code2, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ ip -= 4;
+ safe_store_code(new_code1, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ flush_icache_range(ip, ip + 8);
+
return 0;
}
#endif
@@ -124,13 +144,14 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
*
* move at, ra
* jal _mcount --> nop
+ * sub sp, sp, 8 --> nop (CONFIG_32BIT)
*
* 2. For modules:
*
* 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
*
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
- * addiu v1, v1, low_16bit_of_mcount
+ * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
* move at, ra
* move $12, ra_address
* jalr v1
@@ -139,7 +160,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
* 2.2 For the Other situations
*
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
- * addiu v1, v1, low_16bit_of_mcount
+ * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
* move at, ra
* jalr v1
* nop | move $12, ra_address | sub sp, sp, 8
@@ -158,7 +179,7 @@ int ftrace_make_nop(struct module *mod,
* If ip is in kernel space, no long call, otherwise, long call is
* needed.
*/
- new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
+ new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
#ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new);
#else
@@ -178,10 +199,21 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
unsigned int new;
unsigned long ip = rec->ip;
- new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
- insn_lui_v1_hi16_mcount;
+ /* When the code to patch does not belong to the kernel code
+ * space, we must use insn_la_mcount. However, if MCOUNT_ADDR
+ * is not in compat space, insn_la_mcount is not usable.
+ */
+ if (!core_kernel_text(ip) && !uasm_in_compat_space_p(MCOUNT_ADDR))
+ return -EFAULT;
+
+ new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
+#ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new);
+#else
+ return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
+ INSN_NOP : insn_la_mcount[1]);
+#endif
}
#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
@@ -195,7 +227,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return ftrace_modify_code(FTRACE_CALL_IP, new);
}
-int __init ftrace_dyn_arch_init(void *data)
+int __init ftrace_dyn_arch_init(void)
{
/* Encode the instructions when booting */
ftrace_dyn_arch_init_insns();
@@ -203,9 +235,6 @@ int __init ftrace_dyn_arch_init(void *data)
/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
- /* The return code is retured via data */
- *(unsigned long *)data = 0;
-
return 0;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
@@ -236,7 +265,7 @@ int ftrace_disable_ftrace_graph_caller(void)
#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
-unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+static unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
{
unsigned long sp, ip, tmp;
@@ -248,7 +277,7 @@ unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
* instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
* kernel, move after the instruction "move ra, at"(offset is 16)
*/
- ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
+ ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
/*
* search the text until finding the non-store instruction or "s{d,w}
@@ -294,29 +323,32 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
unsigned long fp)
{
unsigned long old_parent_ra;
- struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long)
&return_to_handler;
int faulted, insns;
+ if (unlikely(ftrace_graph_is_dead()))
+ return;
+
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
/*
- * "parent_ra_addr" is the stack address saved the return address of
- * the caller of _mcount.
+ * "parent_ra_addr" is the stack address where the return address of
+ * the caller of _mcount is saved.
*
- * if the gcc < 4.5, a leaf function does not save the return address
- * in the stack address, so, we "emulate" one in _mcount's stack space,
- * and hijack it directly, but for a non-leaf function, it save the
- * return address to the its own stack space, we can not hijack it
- * directly, but need to find the real stack address,
- * ftrace_get_parent_addr() does it!
+ * If gcc < 4.5, a leaf function does not save the return address
+ * in the stack address, so we "emulate" one in _mcount's stack space,
+ * and hijack it directly.
+ * For a non-leaf function, it does save the return address to its own
+ * stack space, so we can not hijack it directly, but need to find the
+ * real stack address, which is done by ftrace_get_parent_addr().
*
- * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
+ * If gcc >= 4.5, with the new -mmcount-ra-address option, for a
* non-leaf function, the location of the return address will be saved
- * to $12 for us, and for a leaf function, only put a zero into $12. we
- * do it in ftrace_graph_caller of mcount.S.
+ * to $12 for us.
+ * For a leaf function, it just puts a zero into $12, so we handle
+ * it in ftrace_graph_caller() of mcount.S.
*/
/* old_parent_ra = *parent_ra_addr; */
@@ -330,7 +362,7 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
* If fails when getting the stack address of the non-leaf function's
* ra, stop function graph tracer and return
*/
- if (parent_ra_addr == 0)
+ if (parent_ra_addr == NULL)
goto out;
#endif
/* *parent_ra_addr = return_hooker; */
@@ -338,29 +370,50 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
if (unlikely(faulted))
goto out;
- if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
- == -EBUSY) {
- *parent_ra_addr = old_parent_ra;
- return;
- }
-
/*
* Get the recorded ip of the current mcount calling site in the
* __mcount_loc section, which will be used to filter the function
* entries configured through the tracing/set_graph_function interface.
*/
- insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
- trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
+ insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
+ self_ra -= (MCOUNT_INSN_SIZE * insns);
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
+ if (function_graph_enter(old_parent_ra, self_ra, fp, NULL))
*parent_ra_addr = old_parent_ra;
- }
return;
out:
ftrace_graph_stop();
WARN_ON(1);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+
+#ifdef CONFIG_32BIT
+unsigned long __init arch_syscall_addr(int nr)
+{
+ return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
+}
+#endif
+
+#ifdef CONFIG_64BIT
+
+unsigned long __init arch_syscall_addr(int nr)
+{
+#ifdef CONFIG_MIPS32_N32
+ if (nr >= __NR_N32_Linux && nr < __NR_N32_Linux + __NR_N32_Linux_syscalls)
+ return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
+#endif
+ if (nr >= __NR_64_Linux && nr < __NR_64_Linux + __NR_64_Linux_syscalls)
+ return (unsigned long)sys_call_table[nr - __NR_64_Linux];
+#ifdef CONFIG_MIPS32_O32
+ if (nr >= __NR_O32_Linux && nr < __NR_O32_Linux + __NR_O32_Linux_syscalls)
+ return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
+#endif
+
+ return (unsigned long) &sys_ni_syscall;
+}
+#endif
+
+#endif /* CONFIG_FTRACE_SYSCALLS */