summaryrefslogtreecommitdiff
path: root/arch/mips/kernel/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/ftrace.c')
-rw-r--r--arch/mips/kernel/ftrace.c89
1 files changed, 41 insertions, 48 deletions
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 9d9b8fbae202..b15615b28569 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Code for replacing ftrace calls with jumps.
*
@@ -36,10 +37,6 @@ void arch_ftrace_update_code(int command)
ftrace_modify_all_code(command);
}
-#endif
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
#define JUMP_RANGE_MASK ((1UL << 28) - 1)
@@ -57,10 +54,20 @@ static inline void ftrace_dyn_arch_init_insns(void)
u32 *buf;
unsigned int v1;
- /* la v1, _mcount */
- v1 = 3;
- buf = (u32 *)&insn_la_mcount[0];
- UASM_i_LA(&buf, v1, MCOUNT_ADDR);
+ /* If we are not in compat space, the number of generated
+ * instructions will exceed the maximum expected limit of 2.
+ * To prevent buffer overflow, we avoid generating them.
+ * insn_la_mcount will not be used later in ftrace_make_call.
+ */
+ if (uasm_in_compat_space_p(MCOUNT_ADDR)) {
+ /* la v1, _mcount */
+ v1 = 3;
+ buf = (u32 *)&insn_la_mcount[0];
+ UASM_i_LA(&buf, v1, MCOUNT_ADDR);
+ } else {
+ pr_warn("ftrace: mcount address beyond 32 bits is not supported (%lX)\n",
+ MCOUNT_ADDR);
+ }
/* jal (ftrace_caller + 8), jump over the first two instruction */
buf = (u32 *)&insn_jal_ftrace_caller;
@@ -76,7 +83,6 @@ static inline void ftrace_dyn_arch_init_insns(void)
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
int faulted;
- mm_segment_t old_fs;
/* *(unsigned int *)ip = new_code; */
safe_store_code(new_code, ip, faulted);
@@ -84,10 +90,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
if (unlikely(faulted))
return -EFAULT;
- old_fs = get_fs();
- set_fs(get_ds());
flush_icache_range(ip, ip + 8);
- set_fs(old_fs);
return 0;
}
@@ -97,7 +100,6 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
unsigned int new_code2)
{
int faulted;
- mm_segment_t old_fs;
safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted))
@@ -109,10 +111,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
return -EFAULT;
ip -= 4;
- old_fs = get_fs();
- set_fs(get_ds());
flush_icache_range(ip, ip + 8);
- set_fs(old_fs);
return 0;
}
@@ -121,7 +120,6 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
unsigned int new_code2)
{
int faulted;
- mm_segment_t old_fs;
ip += 4;
safe_store_code(new_code2, ip, faulted);
@@ -133,10 +131,7 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
if (unlikely(faulted))
return -EFAULT;
- old_fs = get_fs();
- set_fs(get_ds());
flush_icache_range(ip, ip + 8);
- set_fs(old_fs);
return 0;
}
@@ -204,6 +199,13 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
unsigned int new;
unsigned long ip = rec->ip;
+ /* When the code to patch does not belong to the kernel code
+ * space, we must use insn_la_mcount. However, if MCOUNT_ADDR
+ * is not in compat space, insn_la_mcount is not usable.
+ */
+ if (!core_kernel_text(ip) && !uasm_in_compat_space_p(MCOUNT_ADDR))
+ return -EFAULT;
+
new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
#ifdef CONFIG_64BIT
@@ -263,7 +265,7 @@ int ftrace_disable_ftrace_graph_caller(void)
#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
-unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+static unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
{
unsigned long sp, ip, tmp;
@@ -321,7 +323,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
unsigned long fp)
{
unsigned long old_parent_ra;
- struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long)
&return_to_handler;
int faulted, insns;
@@ -333,20 +334,21 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
return;
/*
- * "parent_ra_addr" is the stack address saved the return address of
- * the caller of _mcount.
+ * "parent_ra_addr" is the stack address where the return address of
+ * the caller of _mcount is saved.
*
- * if the gcc < 4.5, a leaf function does not save the return address
- * in the stack address, so, we "emulate" one in _mcount's stack space,
- * and hijack it directly, but for a non-leaf function, it save the
- * return address to the its own stack space, we can not hijack it
- * directly, but need to find the real stack address,
- * ftrace_get_parent_addr() does it!
+ * If gcc < 4.5, a leaf function does not save the return address
+ * in the stack address, so we "emulate" one in _mcount's stack space,
+ * and hijack it directly.
+ * For a non-leaf function, it does save the return address to its own
+ * stack space, so we can not hijack it directly, but need to find the
+ * real stack address, which is done by ftrace_get_parent_addr().
*
- * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
+ * If gcc >= 4.5, with the new -mmcount-ra-address option, for a
* non-leaf function, the location of the return address will be saved
- * to $12 for us, and for a leaf function, only put a zero into $12. we
- * do it in ftrace_graph_caller of mcount.S.
+ * to $12 for us.
+ * For a leaf function, it just puts a zero into $12, so we handle
+ * it in ftrace_graph_caller() of mcount.S.
*/
/* old_parent_ra = *parent_ra_addr; */
@@ -360,7 +362,7 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
* If fails when getting the stack address of the non-leaf function's
* ra, stop function graph tracer and return
*/
- if (parent_ra_addr == 0)
+ if (parent_ra_addr == NULL)
goto out;
#endif
/* *parent_ra_addr = return_hooker; */
@@ -368,12 +370,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
if (unlikely(faulted))
goto out;
- if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp,
- NULL) == -EBUSY) {
- *parent_ra_addr = old_parent_ra;
- return;
- }
-
/*
* Get the recorded ip of the current mcount calling site in the
* __mcount_loc section, which will be used to filter the function
@@ -381,13 +377,10 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
*/
insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
- trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
+ self_ra -= (MCOUNT_INSN_SIZE * insns);
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
+ if (function_graph_enter(old_parent_ra, self_ra, fp, NULL))
*parent_ra_addr = old_parent_ra;
- }
return;
out:
ftrace_graph_stop();
@@ -409,13 +402,13 @@ unsigned long __init arch_syscall_addr(int nr)
unsigned long __init arch_syscall_addr(int nr)
{
#ifdef CONFIG_MIPS32_N32
- if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls)
+ if (nr >= __NR_N32_Linux && nr < __NR_N32_Linux + __NR_N32_Linux_syscalls)
return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
#endif
- if (nr >= __NR_64_Linux && nr <= __NR_64_Linux + __NR_64_Linux_syscalls)
+ if (nr >= __NR_64_Linux && nr < __NR_64_Linux + __NR_64_Linux_syscalls)
return (unsigned long)sys_call_table[nr - __NR_64_Linux];
#ifdef CONFIG_MIPS32_O32
- if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls)
+ if (nr >= __NR_O32_Linux && nr < __NR_O32_Linux + __NR_O32_Linux_syscalls)
return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
#endif