summaryrefslogtreecommitdiff
path: root/arch/csky/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/csky/kernel')
-rw-r--r--arch/csky/kernel/Makefile10
-rw-r--r--arch/csky/kernel/asm-offsets.c6
-rw-r--r--arch/csky/kernel/atomic.S32
-rw-r--r--arch/csky/kernel/dumpstack.c49
-rw-r--r--arch/csky/kernel/entry.S255
-rw-r--r--arch/csky/kernel/ftrace.c50
-rw-r--r--arch/csky/kernel/head.S11
-rw-r--r--arch/csky/kernel/io.c91
-rw-r--r--arch/csky/kernel/irq.c5
-rw-r--r--arch/csky/kernel/jump_label.c54
-rw-r--r--arch/csky/kernel/module.c5
-rw-r--r--arch/csky/kernel/perf_callchain.c21
-rw-r--r--arch/csky/kernel/perf_event.c4
-rw-r--r--arch/csky/kernel/perf_regs.c3
-rw-r--r--arch/csky/kernel/power.c6
-rw-r--r--arch/csky/kernel/probes/Makefile7
-rw-r--r--arch/csky/kernel/probes/decode-insn.c49
-rw-r--r--arch/csky/kernel/probes/decode-insn.h20
-rw-r--r--arch/csky/kernel/probes/ftrace.c67
-rw-r--r--arch/csky/kernel/probes/kprobes.c412
-rw-r--r--arch/csky/kernel/probes/kprobes_trampoline.S19
-rw-r--r--arch/csky/kernel/probes/simulate-insn.c390
-rw-r--r--arch/csky/kernel/probes/simulate-insn.h49
-rw-r--r--arch/csky/kernel/probes/uprobes.c158
-rw-r--r--arch/csky/kernel/process.c73
-rw-r--r--arch/csky/kernel/ptrace.c303
-rw-r--r--arch/csky/kernel/setup.c117
-rw-r--r--arch/csky/kernel/signal.c25
-rw-r--r--arch/csky/kernel/smp.c100
-rw-r--r--arch/csky/kernel/stacktrace.c173
-rw-r--r--arch/csky/kernel/time.c2
-rw-r--r--arch/csky/kernel/traps.c215
-rw-r--r--arch/csky/kernel/vdso.c121
-rw-r--r--arch/csky/kernel/vdso/.gitignore4
-rw-r--r--arch/csky/kernel/vdso/Makefile60
-rw-r--r--arch/csky/kernel/vdso/note.S12
-rw-r--r--arch/csky/kernel/vdso/rt_sigreturn.S14
-rwxr-xr-xarch/csky/kernel/vdso/so2s.sh5
-rw-r--r--arch/csky/kernel/vdso/vdso.S16
-rw-r--r--arch/csky/kernel/vdso/vdso.lds.S58
-rw-r--r--arch/csky/kernel/vdso/vgettimeofday.c30
-rw-r--r--arch/csky/kernel/vmlinux.lds.S68
42 files changed, 2507 insertions, 662 deletions
diff --git a/arch/csky/kernel/Makefile b/arch/csky/kernel/Makefile
index 071d659f37b7..8a868316b912 100644
--- a/arch/csky/kernel/Makefile
+++ b/arch/csky/kernel/Makefile
@@ -1,9 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
-extra-y := head.o vmlinux.lds
+extra-y := vmlinux.lds
-obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o
-obj-y += power.o syscall.o syscall_table.o setup.o
-obj-y += process.o cpu-probe.o ptrace.o dumpstack.o
+obj-y += head.o entry.o atomic.o signal.o traps.o irq.o time.o vdso.o vdso/
+obj-y += power.o syscall.o syscall_table.o setup.o io.o
+obj-y += process.o cpu-probe.o ptrace.o stacktrace.o
+obj-y += probes/
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o
@@ -12,6 +13,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c
index 9b48b1b1a61b..d1e903579473 100644
--- a/arch/csky/kernel/asm-offsets.c
+++ b/arch/csky/kernel/asm-offsets.c
@@ -9,7 +9,6 @@
int main(void)
{
/* offsets into the task struct */
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
@@ -18,8 +17,7 @@ int main(void)
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
/* offsets into the thread struct */
- DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
- DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
+ DEFINE(THREAD_KSP, offsetof(struct thread_struct, sp));
DEFINE(THREAD_FESR, offsetof(struct thread_struct, user_fp.fesr));
DEFINE(THREAD_FCR, offsetof(struct thread_struct, user_fp.fcr));
DEFINE(THREAD_FPREG, offsetof(struct thread_struct, user_fp.vr));
@@ -27,7 +25,6 @@ int main(void)
/* offsets into the thread_info struct */
DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count));
- DEFINE(TINFO_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TINFO_TP_VALUE, offsetof(struct thread_info, tp_value));
DEFINE(TINFO_TASK, offsetof(struct thread_info, task));
@@ -72,6 +69,7 @@ int main(void)
DEFINE(PT_RLO, offsetof(struct pt_regs, rlo));
#endif
DEFINE(PT_USP, offsetof(struct pt_regs, usp));
+ DEFINE(PT_FRAME_SIZE, sizeof(struct pt_regs));
/* offsets into the irq_cpustat_t struct */
DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t,
diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S
index 5b84f11485ae..e73e548f7855 100644
--- a/arch/csky/kernel/atomic.S
+++ b/arch/csky/kernel/atomic.S
@@ -14,13 +14,19 @@
*/
ENTRY(csky_cmpxchg)
USPTOKSP
+
+ RD_MEH a3
+ WR_MEH a3
+
mfcr a3, epc
addi a3, TRAP0_SIZE
- subi sp, 8
+ subi sp, 16
stw a3, (sp, 0)
mfcr a3, epsr
stw a3, (sp, 4)
+ mfcr a3, usp
+ stw a3, (sp, 8)
psrset ee
#ifdef CONFIG_CPU_HAS_LDSTEX
@@ -34,11 +40,11 @@ ENTRY(csky_cmpxchg)
2:
sync.is
#else
-1:
+GLOBAL(csky_cmpxchg_ldw)
ldw a3, (a2)
cmpne a0, a3
bt16 3f
-2:
+GLOBAL(csky_cmpxchg_stw)
stw a1, (a2)
3:
#endif
@@ -47,23 +53,9 @@ ENTRY(csky_cmpxchg)
mtcr a3, epc
ldw a3, (sp, 4)
mtcr a3, epsr
- addi sp, 8
+ ldw a3, (sp, 8)
+ mtcr a3, usp
+ addi sp, 16
KSPTOUSP
rte
END(csky_cmpxchg)
-
-#ifndef CONFIG_CPU_HAS_LDSTEX
-/*
- * Called from tlbmodified exception
- */
-ENTRY(csky_cmpxchg_fixup)
- mfcr a0, epc
- lrw a1, 2b
- cmpne a1, a0
- bt 1f
- subi a1, (2b - 1b)
- stw a1, (sp, LSAVE_PC)
-1:
- rts
-END(csky_cmpxchg_fixup)
-#endif
diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c
deleted file mode 100644
index d67f9777cfd9..000000000000
--- a/arch/csky/kernel/dumpstack.c
+++ /dev/null
@@ -1,49 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
-
-#include <linux/ptrace.h>
-
-int kstack_depth_to_print = 48;
-
-void show_trace(unsigned long *stack)
-{
- unsigned long *stack_end;
- unsigned long *stack_start;
- unsigned long *fp;
- unsigned long addr;
-
- addr = (unsigned long) stack & THREAD_MASK;
- stack_start = (unsigned long *) addr;
- stack_end = (unsigned long *) (addr + THREAD_SIZE);
-
- fp = stack;
- pr_info("\nCall Trace:");
-
- while (fp > stack_start && fp < stack_end) {
-#ifdef CONFIG_STACKTRACE
- addr = fp[1];
- fp = (unsigned long *) fp[0];
-#else
- addr = *fp++;
-#endif
- if (__kernel_text_address(addr))
- pr_cont("\n[<%08lx>] %pS", addr, (void *)addr);
- }
- pr_cont("\n");
-}
-
-void show_stack(struct task_struct *task, unsigned long *stack)
-{
- if (!stack) {
- if (task)
- stack = (unsigned long *)thread_saved_fp(task);
- else
-#ifdef CONFIG_STACKTRACE
- asm volatile("mov %0, r8\n":"=r"(stack)::"memory");
-#else
- stack = (unsigned long *)&stack;
-#endif
- }
-
- show_trace(stack);
-}
diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S
index a7a5b67df898..c68cdcc76d60 100644
--- a/arch/csky/kernel/entry.S
+++ b/arch/csky/kernel/entry.S
@@ -9,153 +9,75 @@
#include <asm/unistd.h>
#include <asm/asm-offsets.h>
#include <linux/threads.h>
-#include <asm/setup.h>
#include <asm/page.h>
#include <asm/thread_info.h>
-#define PTE_INDX_MSK 0xffc
-#define PTE_INDX_SHIFT 10
-#define _PGDIR_SHIFT 22
-
.macro zero_fp
#ifdef CONFIG_STACKTRACE
movi r8, 0
#endif
.endm
-.macro tlbop_begin name, val0, val1, val2
-ENTRY(csky_\name)
- mtcr a3, ss2
- mtcr r6, ss3
- mtcr a2, ss4
-
- RD_PGDR r6
- RD_MEH a3
-#ifdef CONFIG_CPU_HAS_TLBI
- tlbi.vaas a3
- sync.is
-
- btsti a3, 31
- bf 1f
- RD_PGDR_K r6
-1:
-#else
- bgeni a2, 31
- WR_MCIR a2
- bgeni a2, 25
- WR_MCIR a2
+.macro context_tracking
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+ mfcr a0, epsr
+ btsti a0, 31
+ bt 1f
+ jbsr user_exit_callable
+ ldw a0, (sp, LSAVE_A0)
+ ldw a1, (sp, LSAVE_A1)
+ ldw a2, (sp, LSAVE_A2)
+ ldw a3, (sp, LSAVE_A3)
+#if defined(__CSKYABIV1__)
+ ldw r6, (sp, LSAVE_A4)
+ ldw r7, (sp, LSAVE_A5)
#endif
- bclri r6, 0
- lrw a2, va_pa_offset
- ld.w a2, (a2, 0)
- subu r6, a2
- bseti r6, 31
-
- mov a2, a3
- lsri a2, _PGDIR_SHIFT
- lsli a2, 2
- addu r6, a2
- ldw r6, (r6)
-
- lrw a2, va_pa_offset
- ld.w a2, (a2, 0)
- subu r6, a2
- bseti r6, 31
-
- lsri a3, PTE_INDX_SHIFT
- lrw a2, PTE_INDX_MSK
- and a3, a2
- addu r6, a3
- ldw a3, (r6)
-
- movi a2, (_PAGE_PRESENT | \val0)
- and a3, a2
- cmpne a3, a2
- bt \name
-
- /* First read/write the page, just update the flags */
- ldw a3, (r6)
- bgeni a2, PAGE_VALID_BIT
- bseti a2, PAGE_ACCESSED_BIT
- bseti a2, \val1
- bseti a2, \val2
- or a3, a2
- stw a3, (r6)
-
- /* Some cpu tlb-hardrefill bypass the cache */
-#ifdef CONFIG_CPU_NEED_TLBSYNC
- movi a2, 0x22
- bseti a2, 6
- mtcr r6, cr22
- mtcr a2, cr17
- sync
+1:
#endif
+.endm
- mfcr a3, ss2
- mfcr r6, ss3
- mfcr a2, ss4
- rte
-\name:
- mfcr a3, ss2
- mfcr r6, ss3
- mfcr a2, ss4
+.text
+ENTRY(csky_pagefault)
SAVE_ALL 0
-.endm
-.macro tlbop_end is_write
zero_fp
- RD_MEH a2
- psrset ee, ie
+ context_tracking
+ psrset ee
mov a0, sp
- movi a1, \is_write
jbsr do_page_fault
jmpi ret_from_exception
-.endm
-
-.text
-
-tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
-tlbop_end 0
-
-tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
-tlbop_end 1
-
-tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
-#ifndef CONFIG_CPU_HAS_LDSTEX
-jbsr csky_cmpxchg_fixup
-#endif
-tlbop_end 1
ENTRY(csky_systemcall)
SAVE_ALL TRAP0_SIZE
zero_fp
-
+ context_tracking
psrset ee, ie
- lrw r11, __NR_syscalls
- cmphs syscallid, r11 /* Check nr of syscall */
+ lrw r9, __NR_syscalls
+ cmphs syscallid, r9 /* Check nr of syscall */
bt ret_from_exception
- lrw r13, sys_call_table
- ixw r13, syscallid
- ldw r11, (r13)
- cmpnei r11, 0
+ lrw r9, sys_call_table
+ ixw r9, syscallid
+ ldw syscallid, (r9)
+ cmpnei syscallid, 0
bf ret_from_exception
mov r9, sp
bmaski r10, THREAD_SHIFT
andn r9, r10
- ldw r12, (r9, TINFO_FLAGS)
- ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
- cmpnei r12, 0
+ ldw r10, (r9, TINFO_FLAGS)
+ lrw r9, _TIF_SYSCALL_WORK
+ and r10, r9
+ cmpnei r10, 0
bt csky_syscall_trace
#if defined(__CSKYABIV2__)
subi sp, 8
stw r5, (sp, 0x4)
stw r4, (sp, 0x0)
- jsr r11 /* Do system call */
+ jsr syscallid /* Do system call */
addi sp, 8
#else
- jsr r11
+ jsr syscallid
#endif
stw a0, (sp, LSAVE_A0) /* Save return value */
jmpi ret_from_exception
@@ -163,6 +85,8 @@ ENTRY(csky_systemcall)
csky_syscall_trace:
mov a0, sp /* sp = pt_regs pointer */
jbsr syscall_trace_enter
+ cmpnei a0, 0
+ bt 1f
/* Prepare args before do system call */
ldw a0, (sp, LSAVE_A0)
ldw a1, (sp, LSAVE_A1)
@@ -170,18 +94,20 @@ csky_syscall_trace:
ldw a3, (sp, LSAVE_A3)
#if defined(__CSKYABIV2__)
subi sp, 8
- stw r5, (sp, 0x4)
- stw r4, (sp, 0x0)
+ ldw r9, (sp, LSAVE_A4)
+ stw r9, (sp, 0x0)
+ ldw r9, (sp, LSAVE_A5)
+ stw r9, (sp, 0x4)
+ jsr syscallid /* Do system call */
+ addi sp, 8
#else
ldw r6, (sp, LSAVE_A4)
ldw r7, (sp, LSAVE_A5)
-#endif
- jsr r11 /* Do system call */
-#if defined(__CSKYABIV2__)
- addi sp, 8
+ jsr syscallid /* Do system call */
#endif
stw a0, (sp, LSAVE_A0) /* Save return value */
+1:
mov a0, sp /* right now, sp --> pt_regs */
jbsr syscall_trace_exit
br ret_from_exception
@@ -197,18 +123,20 @@ ENTRY(ret_from_fork)
mov r9, sp
bmaski r10, THREAD_SHIFT
andn r9, r10
- ldw r12, (r9, TINFO_FLAGS)
- ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
- cmpnei r12, 0
+ ldw r10, (r9, TINFO_FLAGS)
+ lrw r9, _TIF_SYSCALL_WORK
+ and r10, r9
+ cmpnei r10, 0
bf ret_from_exception
mov a0, sp /* sp = pt_regs pointer */
jbsr syscall_trace_exit
ret_from_exception:
- ld syscallid, (sp, LSAVE_PSR)
- btsti syscallid, 31
- bt 1f
+ psrclr ie
+ ld r9, (sp, LSAVE_PSR)
+ btsti r9, 31
+ bt 1f
/*
* Load address of current->thread_info, Then get address of task_struct
* Get task_needreshed in task_struct
@@ -217,22 +145,46 @@ ret_from_exception:
bmaski r10, THREAD_SHIFT
andn r9, r10
- ldw r12, (r9, TINFO_FLAGS)
- andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
- cmpnei r12, 0
+ ldw r10, (r9, TINFO_FLAGS)
+ lrw r9, _TIF_WORK_MASK
+ and r10, r9
+ cmpnei r10, 0
bt exit_work
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+ jbsr user_enter_callable
+#endif
1:
+#ifdef CONFIG_PREEMPTION
+ mov r9, sp
+ bmaski r10, THREAD_SHIFT
+ andn r9, r10
+
+ ldw r10, (r9, TINFO_PREEMPT)
+ cmpnei r10, 0
+ bt 2f
+ jbsr preempt_schedule_irq /* irq en/disable is done inside */
+2:
+#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ ld r10, (sp, LSAVE_PSR)
+ btsti r10, 6
+ bf 2f
+ jbsr trace_hardirqs_on
+2:
+#endif
RESTORE_ALL
exit_work:
- lrw syscallid, ret_from_exception
- mov lr, syscallid
+ lrw r9, ret_from_exception
+ mov lr, r9
- btsti r12, TIF_NEED_RESCHED
+ btsti r10, TIF_NEED_RESCHED
bt work_resched
+ psrset ie
mov a0, sp
- mov a1, r12
+ mov a1, r10
jmpi do_notify_resume
work_resched:
@@ -241,6 +193,7 @@ work_resched:
ENTRY(csky_trap)
SAVE_ALL 0
zero_fp
+ context_tracking
psrset ee
mov a0, sp /* Push Stack pointer arg */
jbsr trap_c /* Call C-level trap handler */
@@ -254,6 +207,9 @@ ENTRY(csky_trap)
ENTRY(csky_get_tls)
USPTOKSP
+ RD_MEH a0
+ WR_MEH a0
+
/* increase epc for continue */
mfcr a0, epc
addi a0, TRAP0_SIZE
@@ -275,36 +231,17 @@ ENTRY(csky_get_tls)
ENTRY(csky_irq)
SAVE_ALL 0
zero_fp
+ context_tracking
psrset ee
-#ifdef CONFIG_PREEMPT
- mov r9, sp /* Get current stack pointer */
- bmaski r10, THREAD_SHIFT
- andn r9, r10 /* Get thread_info */
-
- /*
- * Get task_struct->stack.preempt_count for current,
- * and increase 1.
- */
- ldw r12, (r9, TINFO_PREEMPT)
- addi r12, 1
- stw r12, (r9, TINFO_PREEMPT)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ jbsr trace_hardirqs_off
#endif
+
mov a0, sp
- jbsr csky_do_IRQ
+ jbsr generic_handle_arch_irq
-#ifdef CONFIG_PREEMPT
- subi r12, 1
- stw r12, (r9, TINFO_PREEMPT)
- cmpnei r12, 0
- bt 2f
- ldw r12, (r9, TINFO_FLAGS)
- btsti r12, TIF_NEED_RESCHED
- bf 2f
- jbsr preempt_schedule_irq /* irq en/disable is done inside */
-#endif
-2:
jmpi ret_from_exception
/*
@@ -316,11 +253,6 @@ ENTRY(__switch_to)
lrw a3, TASK_THREAD
addu a3, a0
- mfcr a2, psr /* Save PSR value */
- stw a2, (a3, THREAD_SR) /* Save PSR in task struct */
- bclri a2, 6 /* Disable interrupts */
- mtcr a2, psr
-
SAVE_SWITCH_STACK
stw sp, (a3, THREAD_KSP)
@@ -331,12 +263,9 @@ ENTRY(__switch_to)
ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */
- ldw a2, (a3, THREAD_SR) /* Set next PSR */
- mtcr a2, psr
-
#if defined(__CSKYABIV2__)
- addi r7, a1, TASK_THREAD_INFO
- ldw tls, (r7, TINFO_TP_VALUE)
+ addi a3, a1, TASK_THREAD_INFO
+ ldw tls, (a3, TINFO_TP_VALUE)
#endif
RESTORE_SWITCH_STACK
diff --git a/arch/csky/kernel/ftrace.c b/arch/csky/kernel/ftrace.c
index 44f4880179b7..50bfcf129078 100644
--- a/arch/csky/kernel/ftrace.c
+++ b/arch/csky/kernel/ftrace.c
@@ -3,6 +3,7 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
+#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -71,7 +72,8 @@ static int ftrace_check_current_nop(unsigned long hook)
uint16_t olds[7];
unsigned long hook_pos = hook - 2;
- if (probe_kernel_read((void *)olds, (void *)hook_pos, sizeof(nops)))
+ if (copy_from_kernel_nofault((void *)olds, (void *)hook_pos,
+ sizeof(nops)))
return -EFAULT;
if (memcmp((void *)nops, (void *)olds, sizeof(nops))) {
@@ -96,7 +98,7 @@ static int ftrace_modify_code(unsigned long hook, unsigned long target,
make_jbsr(target, hook, call, nolr);
- ret = probe_kernel_write((void *)hook_pos, enable ? call : nops,
+ ret = copy_to_kernel_nofault((void *)hook_pos, enable ? call : nops,
sizeof(nops));
if (ret)
return -EPERM;
@@ -126,14 +128,20 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
{
int ret = ftrace_modify_code((unsigned long)&ftrace_call,
(unsigned long)func, true, true);
+ if (!ret)
+ ret = ftrace_modify_code((unsigned long)&ftrace_regs_call,
+ (unsigned long)func, true, true);
return ret;
}
+#endif /* CONFIG_DYNAMIC_FTRACE */
-int __init ftrace_dyn_arch_init(void)
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
{
- return 0;
+ return ftrace_modify_code(rec->ip, addr, true, true);
}
-#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
@@ -190,5 +198,37 @@ int ftrace_disable_ftrace_graph_caller(void)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#ifdef CONFIG_DYNAMIC_FTRACE
+#ifndef CONFIG_CPU_HAS_ICACHE_INS
+struct ftrace_modify_param {
+ int command;
+ atomic_t cpu_count;
+};
+
+static int __ftrace_modify_code(void *data)
+{
+ struct ftrace_modify_param *param = data;
+
+ if (atomic_inc_return(&param->cpu_count) == 1) {
+ ftrace_modify_all_code(param->command);
+ atomic_inc(&param->cpu_count);
+ } else {
+ while (atomic_read(&param->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ local_icache_inv_all(NULL);
+ }
+
+ return 0;
+}
+
+void arch_ftrace_update_code(int command)
+{
+ struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
+
+ stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
+}
+#endif
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
/* _mcount is defined in abi's mcount.S */
EXPORT_SYMBOL(_mcount);
diff --git a/arch/csky/kernel/head.S b/arch/csky/kernel/head.S
index 61989f9241c0..7e3e4f15b052 100644
--- a/arch/csky/kernel/head.S
+++ b/arch/csky/kernel/head.S
@@ -21,6 +21,17 @@ END(_start)
ENTRY(_start_smp_secondary)
SETUP_MMU
+#ifdef CONFIG_PAGE_OFFSET_80000000
+ lrw r6, secondary_msa1
+ ld.w r6, (r6, 0)
+ mtcr r6, cr<31, 15>
+#endif
+
+ lrw r6, secondary_pgd
+ ld.w r6, (r6, 0)
+ mtcr r6, cr<28, 15>
+ mtcr r6, cr<29, 15>
+
/* set stack point */
lrw r6, secondary_stack
ld.w r6, (r6, 0)
diff --git a/arch/csky/kernel/io.c b/arch/csky/kernel/io.c
new file mode 100644
index 000000000000..5883f13fa2b1
--- /dev/null
+++ b/arch/csky/kernel/io.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ */
+void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
+{
+ while (count && !IS_ALIGNED((unsigned long)from, 4)) {
+ *(u8 *)to = __raw_readb(from);
+ from++;
+ to++;
+ count--;
+ }
+
+ while (count >= 4) {
+ *(u32 *)to = __raw_readl(from);
+ from += 4;
+ to += 4;
+ count -= 4;
+ }
+
+ while (count) {
+ *(u8 *)to = __raw_readb(from);
+ from++;
+ to++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memcpy_fromio);
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ */
+void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
+{
+ while (count && !IS_ALIGNED((unsigned long)to, 4)) {
+ __raw_writeb(*(u8 *)from, to);
+ from++;
+ to++;
+ count--;
+ }
+
+ while (count >= 4) {
+ __raw_writel(*(u32 *)from, to);
+ from += 4;
+ to += 4;
+ count -= 4;
+ }
+
+ while (count) {
+ __raw_writeb(*(u8 *)from, to);
+ from++;
+ to++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memcpy_toio);
+
+/*
+ * "memset" on IO memory space.
+ */
+void __memset_io(volatile void __iomem *dst, int c, size_t count)
+{
+ u32 qc = (u8)c;
+
+ qc |= qc << 8;
+ qc |= qc << 16;
+
+ while (count && !IS_ALIGNED((unsigned long)dst, 4)) {
+ __raw_writeb(c, dst);
+ dst++;
+ count--;
+ }
+
+ while (count >= 4) {
+ __raw_writel(qc, dst);
+ dst += 4;
+ count -= 4;
+ }
+
+ while (count) {
+ __raw_writeb(c, dst);
+ dst++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memset_io);
diff --git a/arch/csky/kernel/irq.c b/arch/csky/kernel/irq.c
index 03a1930f1cbb..fcdaf3156286 100644
--- a/arch/csky/kernel/irq.c
+++ b/arch/csky/kernel/irq.c
@@ -15,8 +15,3 @@ void __init init_IRQ(void)
setup_smp_ipi();
#endif
}
-
-asmlinkage void __irq_entry csky_do_IRQ(struct pt_regs *regs)
-{
- handle_arch_irq(regs);
-}
diff --git a/arch/csky/kernel/jump_label.c b/arch/csky/kernel/jump_label.c
new file mode 100644
index 000000000000..d0e8b21447e1
--- /dev/null
+++ b/arch/csky/kernel/jump_label.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+#define NOP32_HI 0xc400
+#define NOP32_LO 0x4820
+#define BSR_LINK 0xe000
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ unsigned long addr = jump_entry_code(entry);
+ u16 insn[2];
+ int ret = 0;
+
+ if (type == JUMP_LABEL_JMP) {
+ long offset = jump_entry_target(entry) - jump_entry_code(entry);
+
+ if (WARN_ON(offset & 1 || offset < -67108864 || offset >= 67108864))
+ return;
+
+ offset = offset >> 1;
+
+ insn[0] = BSR_LINK |
+ ((uint16_t)((unsigned long) offset >> 16) & 0x3ff);
+ insn[1] = (uint16_t)((unsigned long) offset & 0xffff);
+ } else {
+ insn[0] = NOP32_HI;
+ insn[1] = NOP32_LO;
+ }
+
+ ret = copy_to_kernel_nofault((void *)addr, insn, 4);
+ WARN_ON(ret);
+
+ flush_icache_range(addr, addr + 4);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ /*
+ * We use the same instructions in the arch_static_branch and
+ * arch_static_branch_jump inline functions, so there's no
+ * need to patch them up here.
+ * The core will call arch_jump_label_transform when those
+ * instructions need to be replaced.
+ */
+ arch_jump_label_transform(entry, type);
+}
diff --git a/arch/csky/kernel/module.c b/arch/csky/kernel/module.c
index b5ad7d9de18c..0b56a8cd12a3 100644
--- a/arch/csky/kernel/module.c
+++ b/arch/csky/kernel/module.c
@@ -10,7 +10,6 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
-#include <asm/pgtable.h>
#ifdef CONFIG_CPU_CK810
#define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000)
@@ -41,7 +40,7 @@ static void jsri_2_lrw_jsr(uint32_t *location)
}
}
#else
-static void inline jsri_2_lrw_jsr(uint32_t *location)
+static inline void jsri_2_lrw_jsr(uint32_t *location)
{
return;
}
@@ -69,7 +68,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
*location = rel[i].r_addend + sym->st_value;
break;
case R_CSKY_PC32:
- /* Add the value, subtract its postition */
+ /* Add the value, subtract its position */
*location = rel[i].r_addend + sym->st_value
- (uint32_t)location;
break;
diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c
index e68ff375c8f8..1612f4354087 100644
--- a/arch/csky/kernel/perf_callchain.c
+++ b/arch/csky/kernel/perf_callchain.c
@@ -12,12 +12,17 @@ struct stackframe {
static int unwind_frame_kernel(struct stackframe *frame)
{
- if (kstack_end((void *)frame->fp))
+ unsigned long low = (unsigned long)task_stack_page(current);
+ unsigned long high = low + THREAD_SIZE;
+
+ if (unlikely(frame->fp < low || frame->fp > high))
return -EPERM;
- if (frame->fp & 0x3 || frame->fp < TASK_SIZE)
+
+ if (kstack_end((void *)frame->fp) || frame->fp & 0x3)
return -EPERM;
*frame = *(struct stackframe *)frame->fp;
+
if (__kernel_text_address(frame->lr)) {
int graph = 0;
@@ -44,7 +49,7 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
{
struct stackframe buftail;
unsigned long lr = 0;
- unsigned long *user_frame_tail = (unsigned long *)fp;
+ unsigned long __user *user_frame_tail = (unsigned long __user *)fp;
/* Check accessibility of one struct frame_tail beyond */
if (!access_ok(user_frame_tail, sizeof(buftail)))
@@ -83,10 +88,6 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
{
unsigned long fp = 0;
- /* C-SKY does not support virtualization. */
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
- return;
-
fp = regs->regs[4];
perf_callchain_store(entry, regs->pc);
@@ -107,12 +108,6 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
{
struct stackframe fr;
- /* C-SKY does not support virtualization. */
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- pr_warn("C-SKY does not support perf in guest mode!");
- return;
- }
-
fr.fp = regs->regs[4];
fr.lr = regs->lr;
walk_stackframe(&fr, entry);
diff --git a/arch/csky/kernel/perf_event.c b/arch/csky/kernel/perf_event.c
index 1a29f1157449..e5f18420ce64 100644
--- a/arch/csky/kernel/perf_event.c
+++ b/arch/csky/kernel/perf_event.c
@@ -87,7 +87,7 @@ static int csky_pmu_irq;
})
/* cycle counter */
-static uint64_t csky_pmu_read_cc(void)
+uint64_t csky_pmu_read_cc(void)
{
uint32_t lo, hi, tmp;
uint64_t result;
@@ -1319,7 +1319,7 @@ int csky_pmu_device_probe(struct platform_device *pdev,
pr_notice("[perf] PMU request irq fail!\n");
}
- ret = cpuhp_setup_state(CPUHP_AP_PERF_ONLINE, "AP_PERF_ONLINE",
+ ret = cpuhp_setup_state(CPUHP_AP_PERF_CSKY_ONLINE, "AP_PERF_ONLINE",
csky_pmu_starting_cpu,
csky_pmu_dying_cpu);
if (ret) {
diff --git a/arch/csky/kernel/perf_regs.c b/arch/csky/kernel/perf_regs.c
index eb32838b8210..09b7f88a2d6a 100644
--- a/arch/csky/kernel/perf_regs.c
+++ b/arch/csky/kernel/perf_regs.c
@@ -32,8 +32,7 @@ u64 perf_reg_abi(struct task_struct *task)
}
void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
diff --git a/arch/csky/kernel/power.c b/arch/csky/kernel/power.c
index 923ee4e381b8..86ee202906f8 100644
--- a/arch/csky/kernel/power.c
+++ b/arch/csky/kernel/power.c
@@ -9,16 +9,14 @@ EXPORT_SYMBOL(pm_power_off);
void machine_power_off(void)
{
local_irq_disable();
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
asm volatile ("bkpt");
}
void machine_halt(void)
{
local_irq_disable();
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
asm volatile ("bkpt");
}
diff --git a/arch/csky/kernel/probes/Makefile b/arch/csky/kernel/probes/Makefile
new file mode 100644
index 000000000000..1c7c6e6cb25b
--- /dev/null
+++ b/arch/csky/kernel/probes/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o
+obj-$(CONFIG_KPROBES) += kprobes_trampoline.o
+obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
+obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o
+
+CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE)
diff --git a/arch/csky/kernel/probes/decode-insn.c b/arch/csky/kernel/probes/decode-insn.c
new file mode 100644
index 000000000000..bbc4edc25dc9
--- /dev/null
+++ b/arch/csky/kernel/probes/decode-insn.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <asm/sections.h>
+
+#include "decode-insn.h"
+#include "simulate-insn.h"
+
+/* Return:
+ * INSN_REJECTED If instruction is one not allowed to kprobe,
+ * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
+ */
+enum probe_insn __kprobes
+csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api)
+{
+ probe_opcode_t insn = le32_to_cpu(*addr);
+
+ CSKY_INSN_SET_SIMULATE(br16, insn);
+ CSKY_INSN_SET_SIMULATE(bt16, insn);
+ CSKY_INSN_SET_SIMULATE(bf16, insn);
+ CSKY_INSN_SET_SIMULATE(jmp16, insn);
+ CSKY_INSN_SET_SIMULATE(jsr16, insn);
+ CSKY_INSN_SET_SIMULATE(lrw16, insn);
+ CSKY_INSN_SET_SIMULATE(pop16, insn);
+
+ CSKY_INSN_SET_SIMULATE(br32, insn);
+ CSKY_INSN_SET_SIMULATE(bt32, insn);
+ CSKY_INSN_SET_SIMULATE(bf32, insn);
+ CSKY_INSN_SET_SIMULATE(jmp32, insn);
+ CSKY_INSN_SET_SIMULATE(jsr32, insn);
+ CSKY_INSN_SET_SIMULATE(lrw32, insn);
+ CSKY_INSN_SET_SIMULATE(pop32, insn);
+
+ CSKY_INSN_SET_SIMULATE(bez32, insn);
+ CSKY_INSN_SET_SIMULATE(bnez32, insn);
+ CSKY_INSN_SET_SIMULATE(bnezad32, insn);
+ CSKY_INSN_SET_SIMULATE(bhsz32, insn);
+ CSKY_INSN_SET_SIMULATE(bhz32, insn);
+ CSKY_INSN_SET_SIMULATE(blsz32, insn);
+ CSKY_INSN_SET_SIMULATE(blz32, insn);
+ CSKY_INSN_SET_SIMULATE(bsr32, insn);
+ CSKY_INSN_SET_SIMULATE(jmpi32, insn);
+ CSKY_INSN_SET_SIMULATE(jsri32, insn);
+
+ return INSN_GOOD;
+}
diff --git a/arch/csky/kernel/probes/decode-insn.h b/arch/csky/kernel/probes/decode-insn.h
new file mode 100644
index 000000000000..9c4ad48fee0d
--- /dev/null
+++ b/arch/csky/kernel/probes/decode-insn.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __CSKY_KERNEL_KPROBES_DECODE_INSN_H
+#define __CSKY_KERNEL_KPROBES_DECODE_INSN_H
+
+#include <asm/sections.h>
+#include <asm/kprobes.h>
+
+enum probe_insn {
+ INSN_REJECTED,
+ INSN_GOOD_NO_SLOT,
+ INSN_GOOD,
+};
+
+#define is_insn32(insn) ((insn & 0xc000) == 0xc000)
+
+enum probe_insn __kprobes
+csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *asi);
+
+#endif /* __CSKY_KERNEL_KPROBES_DECODE_INSN_H */
diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c
new file mode 100644
index 000000000000..834cffcfbce3
--- /dev/null
+++ b/arch/csky/kernel/probes/ftrace.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kprobes.h>
+
+/* Ftrace callback handler for kprobes -- called under preepmt disabled */
+void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
+{
+ int bit;
+ bool lr_saver = false;
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+ struct pt_regs *regs;
+
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+
+ regs = ftrace_get_regs(fregs);
+ p = get_kprobe((kprobe_opcode_t *)ip);
+ if (!p) {
+ p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE));
+ if (unlikely(!p) || kprobe_disabled(p))
+ goto out;
+ lr_saver = true;
+ }
+
+ kcb = get_kprobe_ctlblk();
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(p);
+ } else {
+ unsigned long orig_ip = instruction_pointer(regs);
+
+ if (lr_saver)
+ ip -= MCOUNT_INSN_SIZE;
+ instruction_pointer_set(regs, ip);
+ __this_cpu_write(current_kprobe, p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ /*
+ * Emulate singlestep (and also recover regs->pc)
+ * as if there is a nop
+ */
+ instruction_pointer_set(regs,
+ (unsigned long)p->addr + MCOUNT_INSN_SIZE);
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ instruction_pointer_set(regs, orig_ip);
+ }
+ /*
+ * If pre_handler returns !0, it changes regs->pc. We have to
+ * skip emulating post_handler.
+ */
+ __this_cpu_write(current_kprobe, NULL);
+ }
+out:
+ ftrace_test_recursion_unlock(bit);
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ p->ainsn.api.insn = NULL;
+ return 0;
+}
diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c
new file mode 100644
index 000000000000..3c6e5c725d81
--- /dev/null
+++ b/arch/csky/kernel/probes/kprobes.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#define pr_fmt(fmt) "kprobes: " fmt
+
+#include <linux/kprobes.h>
+#include <linux/extable.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+#include <asm/ptrace.h>
+#include <linux/uaccess.h>
+#include <asm/sections.h>
+#include <asm/cacheflush.h>
+
+#include "decode-insn.h"
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
+
+struct csky_insn_patch {
+ kprobe_opcode_t *addr;
+ u32 opcode;
+ atomic_t cpu_count;
+};
+
+static int __kprobes patch_text_cb(void *priv)
+{
+ struct csky_insn_patch *param = priv;
+ unsigned int addr = (unsigned int)param->addr;
+
+ if (atomic_inc_return(&param->cpu_count) == num_online_cpus()) {
+ *(u16 *) addr = cpu_to_le16(param->opcode);
+ dcache_wb_range(addr, addr + 2);
+ atomic_inc(&param->cpu_count);
+ } else {
+ while (atomic_read(&param->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ }
+
+ icache_inv_range(addr, addr + 2);
+
+ return 0;
+}
+
+static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
+{
+ struct csky_insn_patch param = { addr, opcode, ATOMIC_INIT(0) };
+
+ return stop_machine_cpuslocked(patch_text_cb, &param, cpu_online_mask);
+}
+
+static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
+{
+ unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
+
+ p->ainsn.api.restore = (unsigned long)p->addr + offset;
+
+ patch_text(p->ainsn.api.insn, p->opcode);
+}
+
+static void __kprobes arch_prepare_simulate(struct kprobe *p)
+{
+ p->ainsn.api.restore = 0;
+}
+
+static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (p->ainsn.api.handler)
+ p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
+
+ post_kprobe_handler(kcb, regs);
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ unsigned long probe_addr = (unsigned long)p->addr;
+
+ if (probe_addr & 0x1)
+ return -EILSEQ;
+
+ /* copy instruction */
+ p->opcode = le32_to_cpu(*p->addr);
+
+ /* decode instruction */
+ switch (csky_probe_decode_insn(p->addr, &p->ainsn.api)) {
+ case INSN_REJECTED: /* insn not supported */
+ return -EINVAL;
+
+ case INSN_GOOD_NO_SLOT: /* insn need simulation */
+ p->ainsn.api.insn = NULL;
+ break;
+
+ case INSN_GOOD: /* instruction uses slot */
+ p->ainsn.api.insn = get_insn_slot();
+ if (!p->ainsn.api.insn)
+ return -ENOMEM;
+ break;
+ }
+
+ /* prepare the instruction */
+ if (p->ainsn.api.insn)
+ arch_prepare_ss_slot(p);
+ else
+ arch_prepare_simulate(p);
+
+ return 0;
+}
+
+/* install breakpoint in text */
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ patch_text(p->addr, USR_BKPT);
+}
+
+/* remove breakpoint from text */
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ patch_text(p->addr, p->opcode);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ if (p->ainsn.api.insn) {
+ free_insn_slot(p->ainsn.api.insn, 0);
+ p->ainsn.api.insn = NULL;
+ }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p)
+{
+ __this_cpu_write(current_kprobe, p);
+}
+
+/*
+ * Interrupts need to be disabled before single-step mode is set, and not
+ * reenabled until after single-step mode ends.
+ * Without disabling interrupt on local CPU, there is a chance of
+ * interrupt occurrence in the period of exception return and start of
+ * out-of-line single-step, that result in wrongly single stepping
+ * into the interrupt handler.
+ */
+static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ kcb->saved_sr = regs->sr;
+ regs->sr &= ~BIT(6);
+}
+
+static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ regs->sr = kcb->saved_sr;
+}
+
+static void __kprobes
+set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p)
+{
+ unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
+
+ kcb->ss_ctx.ss_pending = true;
+ kcb->ss_ctx.match_addr = addr + offset;
+}
+
+static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
+{
+ kcb->ss_ctx.ss_pending = false;
+ kcb->ss_ctx.match_addr = 0;
+}
+
+#define TRACE_MODE_SI BIT(14)
+#define TRACE_MODE_MASK ~(0x3 << 14)
+#define TRACE_MODE_RUN 0
+
+static void __kprobes setup_singlestep(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb, int reenter)
+{
+ unsigned long slot;
+
+ if (reenter) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ } else {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+ if (p->ainsn.api.insn) {
+ /* prepare for single stepping */
+ slot = (unsigned long)p->ainsn.api.insn;
+
+ set_ss_context(kcb, slot, p); /* mark pending ss */
+
+ /* IRQs and single stepping do not mix well. */
+ kprobes_save_local_irqflag(kcb, regs);
+ regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI;
+ instruction_pointer_set(regs, slot);
+ } else {
+ /* insn simulation */
+ arch_simulate_insn(p, regs);
+ }
+}
+
+static int __kprobes reenter_kprobe(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_ACTIVE:
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs, kcb, 1);
+ break;
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ pr_warn("Failed to recover from reentered kprobes.\n");
+ dump_kprobe(p);
+ BUG();
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+
+ if (!cur)
+ return;
+
+ /* return addr restore if non-branching insn */
+ if (cur->ainsn.api.restore != 0)
+ regs->pc = cur->ainsn.api.restore;
+
+ /* restore back original saved kprobe variables and continue */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ return;
+ }
+
+ /* call post handler */
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (cur->post_handler) {
+ /* post_handler can hit breakpoint and single step
+ * again, so we enable D-flag for recursive exception.
+ */
+ cur->post_handler(cur, regs, 0);
+ }
+
+ reset_current_kprobe();
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the ip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->pc = (unsigned long) cur->addr;
+ BUG_ON(!instruction_pointer(regs));
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+ if (fixup_exception(regs))
+ return 1;
+ }
+ return 0;
+}
+
+int __kprobes
+kprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ struct kprobe *p, *cur_kprobe;
+ struct kprobe_ctlblk *kcb;
+ unsigned long addr = instruction_pointer(regs);
+
+ kcb = get_kprobe_ctlblk();
+ cur_kprobe = kprobe_running();
+
+ p = get_kprobe((kprobe_opcode_t *) addr);
+
+ if (p) {
+ if (cur_kprobe) {
+ if (reenter_kprobe(p, regs, kcb))
+ return 1;
+ } else {
+ /* Probe hit */
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /*
+ * If we have no pre-handler or it returned 0, we
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and no need to single
+ * stepping. Let's just reset current kprobe and exit.
+ *
+ * pre_handler can hit a breakpoint and can step thru
+ * before return.
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs))
+ setup_singlestep(p, regs, kcb, 0);
+ else
+ reset_current_kprobe();
+ }
+ return 1;
+ }
+
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ * Return back to original instruction, and continue.
+ */
+ return 0;
+}
+
+int __kprobes
+kprobe_single_step_handler(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if ((kcb->ss_ctx.ss_pending)
+ && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) {
+ clear_ss_context(kcb); /* clear pending ss */
+
+ kprobes_restore_local_irqflag(kcb, regs);
+ regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN;
+
+ post_kprobe_handler(kcb, regs);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
+ * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
+ */
+int __init arch_populate_kprobe_blacklist(void)
+{
+ int ret;
+
+ ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
+ (unsigned long)__irqentry_text_end);
+ return ret;
+}
+
+void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
+{
+ return (void *)kretprobe_trampoline_handler(regs, NULL);
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *)regs->lr;
+ ri->fp = NULL;
+ regs->lr = (unsigned long) &__kretprobe_trampoline;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ return 0;
+}
+
+int __init arch_init_kprobes(void)
+{
+ return 0;
+}
diff --git a/arch/csky/kernel/probes/kprobes_trampoline.S b/arch/csky/kernel/probes/kprobes_trampoline.S
new file mode 100644
index 000000000000..ba48ad04a847
--- /dev/null
+++ b/arch/csky/kernel/probes/kprobes_trampoline.S
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#include <linux/linkage.h>
+
+#include <abi/entry.h>
+
+ENTRY(__kretprobe_trampoline)
+ SAVE_REGS_FTRACE
+
+ mov a0, sp /* pt_regs */
+
+ jbsr trampoline_probe_handler
+
+ /* use the result as the return-address */
+ mov lr, a0
+
+ RESTORE_REGS_FTRACE
+ rts
+ENDPROC(__kretprobe_trampoline)
diff --git a/arch/csky/kernel/probes/simulate-insn.c b/arch/csky/kernel/probes/simulate-insn.c
new file mode 100644
index 000000000000..d6e8d092c9b7
--- /dev/null
+++ b/arch/csky/kernel/probes/simulate-insn.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+
+#include "decode-insn.h"
+#include "simulate-insn.h"
+
+static inline bool csky_insn_reg_get_val(struct pt_regs *regs,
+ unsigned long index,
+ unsigned long *ptr)
+{
+ if (index < 14)
+ *ptr = *(&regs->a0 + index);
+
+ if (index > 15 && index < 31)
+ *ptr = *(&regs->exregs[0] + index - 16);
+
+ switch (index) {
+ case 14:
+ *ptr = regs->usp;
+ break;
+ case 15:
+ *ptr = regs->lr;
+ break;
+ case 31:
+ *ptr = regs->tls;
+ break;
+ default:
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+static inline bool csky_insn_reg_set_val(struct pt_regs *regs,
+ unsigned long index,
+ unsigned long val)
+{
+ if (index < 14)
+ *(&regs->a0 + index) = val;
+
+ if (index > 15 && index < 31)
+ *(&regs->exregs[0] + index - 16) = val;
+
+ switch (index) {
+ case 14:
+ regs->usp = val;
+ break;
+ case 15:
+ regs->lr = val;
+ break;
+ case 31:
+ regs->tls = val;
+ break;
+ default:
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+void __kprobes
+simulate_br16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0x3ff) << 1, 9));
+}
+
+void __kprobes
+simulate_br32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+}
+
+void __kprobes
+simulate_bt16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ if (regs->sr & 1)
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0x3ff) << 1, 9));
+ else
+ instruction_pointer_set(regs, addr + 2);
+}
+
+void __kprobes
+simulate_bt32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ if (regs->sr & 1)
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_bf16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ if (!(regs->sr & 1))
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0x3ff) << 1, 9));
+ else
+ instruction_pointer_set(regs, addr + 2);
+}
+
+void __kprobes
+simulate_bf32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ if (!(regs->sr & 1))
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_jmp16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = (opcode >> 2) & 0xf;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ instruction_pointer_set(regs, tmp & 0xfffffffe);
+}
+
+void __kprobes
+simulate_jmp32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ instruction_pointer_set(regs, tmp & 0xfffffffe);
+}
+
+void __kprobes
+simulate_jsr16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = (opcode >> 2) & 0xf;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ regs->lr = addr + 2;
+
+ instruction_pointer_set(regs, tmp & 0xfffffffe);
+}
+
+void __kprobes
+simulate_jsr32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ regs->lr = addr + 4;
+
+ instruction_pointer_set(regs, tmp & 0xfffffffe);
+}
+
+void __kprobes
+simulate_lrw16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long val;
+ unsigned long tmp = (opcode & 0x300) >> 3;
+ unsigned long offset = ((opcode & 0x1f) | tmp) << 2;
+
+ tmp = (opcode & 0xe0) >> 5;
+
+ val = *(unsigned int *)(instruction_pointer(regs) + offset);
+
+ csky_insn_reg_set_val(regs, tmp, val);
+}
+
+void __kprobes
+simulate_lrw32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long val;
+ unsigned long offset = (opcode & 0xffff0000) >> 14;
+ unsigned long tmp = opcode & 0x0000001f;
+
+ val = *(unsigned int *)
+ ((instruction_pointer(regs) + offset) & 0xfffffffc);
+
+ csky_insn_reg_set_val(regs, tmp, val);
+}
+
+void __kprobes
+simulate_pop16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long *tmp = (unsigned long *)regs->usp;
+ int i;
+
+ for (i = 0; i < (opcode & 0xf); i++) {
+ csky_insn_reg_set_val(regs, i + 4, *tmp);
+ tmp += 1;
+ }
+
+ if (opcode & 0x10) {
+ csky_insn_reg_set_val(regs, 15, *tmp);
+ tmp += 1;
+ }
+
+ regs->usp = (unsigned long)tmp;
+
+ instruction_pointer_set(regs, regs->lr);
+}
+
+void __kprobes
+simulate_pop32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long *tmp = (unsigned long *)regs->usp;
+ int i;
+
+ for (i = 0; i < ((opcode & 0xf0000) >> 16); i++) {
+ csky_insn_reg_set_val(regs, i + 4, *tmp);
+ tmp += 1;
+ }
+
+ if (opcode & 0x100000) {
+ csky_insn_reg_set_val(regs, 15, *tmp);
+ tmp += 1;
+ }
+
+ for (i = 0; i < ((opcode & 0xe00000) >> 21); i++) {
+ csky_insn_reg_set_val(regs, i + 16, *tmp);
+ tmp += 1;
+ }
+
+ if (opcode & 0x1000000) {
+ csky_insn_reg_set_val(regs, 29, *tmp);
+ tmp += 1;
+ }
+
+ regs->usp = (unsigned long)tmp;
+
+ instruction_pointer_set(regs, regs->lr);
+}
+
+void __kprobes
+simulate_bez32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ if (tmp == 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_bnez32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ if (tmp != 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_bnezad32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+ long val;
+
+ csky_insn_reg_get_val(regs, tmp, (unsigned long *)&val);
+
+ val -= 1;
+
+ if (val > 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+
+ csky_insn_reg_set_val(regs, tmp, (unsigned long)val);
+}
+
+void __kprobes
+simulate_bhsz32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+ unsigned long val;
+
+ csky_insn_reg_get_val(regs, tmp, &val);
+
+ if ((long) val >= 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_bhz32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+ unsigned long val;
+
+ csky_insn_reg_get_val(regs, tmp, &val);
+
+ if ((long) val > 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_blsz32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+ unsigned long val;
+
+ csky_insn_reg_get_val(regs, tmp, &val);
+
+ if ((long) val <= 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_blz32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+ unsigned long val;
+
+ csky_insn_reg_get_val(regs, tmp, &val);
+
+ if ((long) val < 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_bsr32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp;
+
+ tmp = (opcode & 0xffff) << 16;
+ tmp |= (opcode & 0xffff0000) >> 16;
+
+ instruction_pointer_set(regs,
+ addr + sign_extend32((tmp & 0x3ffffff) << 1, 15));
+
+ regs->lr = addr + 4;
+}
+
+void __kprobes
+simulate_jmpi32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long val;
+ unsigned long offset = ((opcode & 0xffff0000) >> 14);
+
+ val = *(unsigned int *)
+ ((instruction_pointer(regs) + offset) & 0xfffffffc);
+
+ instruction_pointer_set(regs, val);
+}
+
+void __kprobes
+simulate_jsri32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long val;
+ unsigned long offset = ((opcode & 0xffff0000) >> 14);
+
+ val = *(unsigned int *)
+ ((instruction_pointer(regs) + offset) & 0xfffffffc);
+
+ regs->lr = addr + 4;
+
+ instruction_pointer_set(regs, val);
+}
diff --git a/arch/csky/kernel/probes/simulate-insn.h b/arch/csky/kernel/probes/simulate-insn.h
new file mode 100644
index 000000000000..ba4cb7ef062e
--- /dev/null
+++ b/arch/csky/kernel/probes/simulate-insn.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __CSKY_KERNEL_PROBES_SIMULATE_INSN_H
+#define __CSKY_KERNEL_PROBES_SIMULATE_INSN_H
+
+#define __CSKY_INSN_FUNCS(name, mask, val) \
+static __always_inline bool csky_insn_is_##name(probe_opcode_t code) \
+{ \
+ BUILD_BUG_ON(~(mask) & (val)); \
+ return (code & (mask)) == (val); \
+} \
+void simulate_##name(u32 opcode, long addr, struct pt_regs *regs);
+
+#define CSKY_INSN_SET_SIMULATE(name, code) \
+ do { \
+ if (csky_insn_is_##name(code)) { \
+ api->handler = simulate_##name; \
+ return INSN_GOOD_NO_SLOT; \
+ } \
+ } while (0)
+
+__CSKY_INSN_FUNCS(br16, 0xfc00, 0x0400)
+__CSKY_INSN_FUNCS(bt16, 0xfc00, 0x0800)
+__CSKY_INSN_FUNCS(bf16, 0xfc00, 0x0c00)
+__CSKY_INSN_FUNCS(jmp16, 0xffc3, 0x7800)
+__CSKY_INSN_FUNCS(jsr16, 0xffc3, 0x7801)
+__CSKY_INSN_FUNCS(lrw16, 0xfc00, 0x1000)
+__CSKY_INSN_FUNCS(pop16, 0xffe0, 0x1480)
+
+__CSKY_INSN_FUNCS(br32, 0x0000ffff, 0x0000e800)
+__CSKY_INSN_FUNCS(bt32, 0x0000ffff, 0x0000e860)
+__CSKY_INSN_FUNCS(bf32, 0x0000ffff, 0x0000e840)
+__CSKY_INSN_FUNCS(jmp32, 0xffffffe0, 0x0000e8c0)
+__CSKY_INSN_FUNCS(jsr32, 0xffffffe0, 0x0000e8e0)
+__CSKY_INSN_FUNCS(lrw32, 0x0000ffe0, 0x0000ea80)
+__CSKY_INSN_FUNCS(pop32, 0xfe00ffff, 0x0000ebc0)
+
+__CSKY_INSN_FUNCS(bez32, 0x0000ffe0, 0x0000e900)
+__CSKY_INSN_FUNCS(bnez32, 0x0000ffe0, 0x0000e920)
+__CSKY_INSN_FUNCS(bnezad32, 0x0000ffe0, 0x0000e820)
+__CSKY_INSN_FUNCS(bhsz32, 0x0000ffe0, 0x0000e9a0)
+__CSKY_INSN_FUNCS(bhz32, 0x0000ffe0, 0x0000e940)
+__CSKY_INSN_FUNCS(blsz32, 0x0000ffe0, 0x0000e960)
+__CSKY_INSN_FUNCS(blz32, 0x0000ffe0, 0x0000e980)
+__CSKY_INSN_FUNCS(bsr32, 0x0000fc00, 0x0000e000)
+__CSKY_INSN_FUNCS(jmpi32, 0x0000ffff, 0x0000eac0)
+__CSKY_INSN_FUNCS(jsri32, 0x0000ffff, 0x0000eae0)
+
+#endif /* __CSKY_KERNEL_PROBES_SIMULATE_INSN_H */
diff --git a/arch/csky/kernel/probes/uprobes.c b/arch/csky/kernel/probes/uprobes.c
new file mode 100644
index 000000000000..936bea6fd32d
--- /dev/null
+++ b/arch/csky/kernel/probes/uprobes.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
+ */
+#include <linux/highmem.h>
+#include <linux/ptrace.h>
+#include <linux/uprobes.h>
+#include <asm/cacheflush.h>
+
+#include "decode-insn.h"
+
+#define UPROBE_TRAP_NR UINT_MAX
+
+bool is_swbp_insn(uprobe_opcode_t *insn)
+{
+ return (*insn & 0xffff) == UPROBE_SWBP_INSN;
+}
+
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ unsigned long addr)
+{
+ probe_opcode_t insn;
+
+ insn = *(probe_opcode_t *)(&auprobe->insn[0]);
+
+ auprobe->insn_size = is_insn32(insn) ? 4 : 2;
+
+ switch (csky_probe_decode_insn(&insn, &auprobe->api)) {
+ case INSN_REJECTED:
+ return -EINVAL;
+
+ case INSN_GOOD_NO_SLOT:
+ auprobe->simulate = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ utask->autask.saved_trap_no = current->thread.trap_no;
+ current->thread.trap_no = UPROBE_TRAP_NR;
+
+ instruction_pointer_set(regs, utask->xol_vaddr);
+
+ user_enable_single_step(current);
+
+ return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR);
+ current->thread.trap_no = utask->autask.saved_trap_no;
+
+ instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
+
+ user_disable_single_step(current);
+
+ return 0;
+}
+
+bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+{
+ if (t->thread.trap_no != UPROBE_TRAP_NR)
+ return true;
+
+ return false;
+}
+
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ probe_opcode_t insn;
+ unsigned long addr;
+
+ if (!auprobe->simulate)
+ return false;
+
+ insn = *(probe_opcode_t *)(&auprobe->insn[0]);
+ addr = instruction_pointer(regs);
+
+ if (auprobe->api.handler)
+ auprobe->api.handler(insn, addr, regs);
+
+ return true;
+}
+
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ current->thread.trap_no = utask->autask.saved_trap_no;
+
+ /*
+ * Task has received a fatal signal, so reset back to probed
+ * address.
+ */
+ instruction_pointer_set(regs, utask->vaddr);
+
+ user_disable_single_step(current);
+}
+
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+ struct pt_regs *regs)
+{
+ if (ctx == RP_CHECK_CHAIN_CALL)
+ return regs->usp <= ret->stack;
+ else
+ return regs->usp < ret->stack;
+}
+
+unsigned long
+arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
+ struct pt_regs *regs)
+{
+ unsigned long ra;
+
+ ra = regs->lr;
+
+ regs->lr = trampoline_vaddr;
+
+ return ra;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ return NOTIFY_DONE;
+}
+
+int uprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ if (uprobe_pre_sstep_notifier(regs))
+ return 1;
+
+ return 0;
+}
+
+int uprobe_single_step_handler(struct pt_regs *regs)
+{
+ if (uprobe_post_sstep_notifier(regs))
+ return 1;
+
+ return 0;
+}
diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
index f320d9248a22..0c6e4b17fe00 100644
--- a/arch/csky/kernel/process.c
+++ b/arch/csky/kernel/process.c
@@ -2,7 +2,6 @@
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/debug.h>
@@ -10,12 +9,19 @@
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
+#include <linux/elfcore.h>
#include <asm/elf.h>
#include <abi/reg_ops.h>
struct cpuinfo_csky cpu_data[NR_CPUS];
+#ifdef CONFIG_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
@@ -24,21 +30,11 @@ asmlinkage void ret_from_kernel_thread(void);
*/
void flush_thread(void){}
-/*
- * Return saved PC from a blocked thread
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
-
- return sw->r15;
-}
-
-int copy_thread(unsigned long clone_flags,
- unsigned long usp,
- unsigned long kthread_arg,
- struct task_struct *p)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct switch_stack *childstack;
struct pt_regs *childregs = task_pt_regs(p);
@@ -49,14 +45,14 @@ int copy_thread(unsigned long clone_flags,
childstack = ((struct switch_stack *) childregs) - 1;
memset(childstack, 0, sizeof(struct switch_stack));
- /* setup ksp for switch_to !!! */
- p->thread.ksp = (unsigned long)childstack;
+ /* setup thread.sp for switch_to !!! */
+ p->thread.sp = (unsigned long)childstack;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(args->fn)) {
memset(childregs, 0, sizeof(struct pt_regs));
childstack->r15 = (unsigned long) ret_from_kernel_thread;
- childstack->r10 = kthread_arg;
- childstack->r9 = usp;
+ childstack->r10 = (unsigned long) args->fn_arg;
+ childstack->r9 = (unsigned long) args->fn;
childregs->sr = mfcr("psr");
} else {
*childregs = *(current_pt_regs());
@@ -64,7 +60,7 @@ int copy_thread(unsigned long clone_flags,
childregs->usp = usp;
if (clone_flags & CLONE_SETTLS)
task_thread_info(p)->tp_value = childregs->tls
- = childregs->regs[0];
+ = tls;
childregs->a0 = 0;
childstack->r15 = (unsigned long) ret_from_fork;
@@ -74,12 +70,11 @@ int copy_thread(unsigned long clone_flags,
}
/* Fill in the fpu structure for a core dump. */
-int dump_fpu(struct pt_regs *regs, struct user_fp *fpu)
+int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
{
memcpy(fpu, &current->thread.user_fp, sizeof(*fpu));
return 1;
}
-EXPORT_SYMBOL(dump_fpu);
int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs)
{
@@ -91,37 +86,6 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs)
return 1;
}
-unsigned long get_wchan(struct task_struct *p)
-{
- unsigned long lr;
- unsigned long *fp, *stack_start, *stack_end;
- int count = 0;
-
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
-
- stack_start = (unsigned long *)end_of_stack(p);
- stack_end = (unsigned long *)(task_stack_page(p) + THREAD_SIZE);
-
- fp = (unsigned long *) thread_saved_fp(p);
- do {
- if (fp < stack_start || fp > stack_end)
- return 0;
-#ifdef CONFIG_STACKTRACE
- lr = fp[1];
- fp = (unsigned long *)fp[0];
-#else
- lr = *fp++;
-#endif
- if (!in_sched_functions(lr) &&
- __kernel_text_address(lr))
- return lr;
- } while (count++ < 16);
-
- return 0;
-}
-EXPORT_SYMBOL(get_wchan);
-
#ifndef CONFIG_CPU_PM_NONE
void arch_cpu_idle(void)
{
@@ -136,6 +100,5 @@ void arch_cpu_idle(void)
#ifdef CONFIG_CPU_PM_STOP
asm volatile("stop\n");
#endif
- local_irq_enable();
}
#endif
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c
index 313623a19ecb..0f7e7b653c72 100644
--- a/arch/csky/kernel/ptrace.c
+++ b/arch/csky/kernel/ptrace.c
@@ -12,17 +12,16 @@
#include <linux/sched/task_stack.h>
#include <linux/signal.h>
#include <linux/smp.h>
-#include <linux/tracehook.h>
#include <linux/uaccess.h>
#include <linux/user.h>
#include <asm/thread_info.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/asm-offsets.h>
#include <abi/regdef.h>
+#include <abi/ckmmu.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
@@ -41,6 +40,9 @@ static void singlestep_disable(struct task_struct *tsk)
regs = task_pt_regs(tsk);
regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN;
+
+ /* Enable irq */
+ regs->sr |= BIT(6);
}
static void singlestep_enable(struct task_struct *tsk)
@@ -49,6 +51,9 @@ static void singlestep_enable(struct task_struct *tsk)
regs = task_pt_regs(tsk);
regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI;
+
+ /* Disable irq */
+ regs->sr &= ~BIT(6);
}
/*
@@ -71,17 +76,14 @@ enum csky_regset {
static int gpr_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+ struct membuf to)
{
- struct pt_regs *regs;
-
- regs = task_pt_regs(target);
+ struct pt_regs *regs = task_pt_regs(target);
/* Abiv1 regs->tls is fake and we need sync here. */
regs->tls = task_thread_info(target)->tp_value;
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
+ return membuf_write(&to, regs, sizeof(*regs));
}
static int gpr_set(struct task_struct *target,
@@ -96,7 +98,8 @@ static int gpr_set(struct task_struct *target,
if (ret)
return ret;
- regs.sr = task_pt_regs(target)->sr;
+ /* BIT(0) of regs.sr is Condition Code/Carry bit */
+ regs.sr = (regs.sr & BIT(0)) | (task_pt_regs(target)->sr & ~BIT(0));
#ifdef CONFIG_CPU_HAS_HILO
regs.dcsr = task_pt_regs(target)->dcsr;
#endif
@@ -109,8 +112,7 @@ static int gpr_set(struct task_struct *target,
static int fpr_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+ struct membuf to)
{
struct user_fp *regs = (struct user_fp *)&target->thread.user_fp;
@@ -126,9 +128,9 @@ static int fpr_get(struct task_struct *target,
for (i = 0; i < 32; i++)
tmp.vr[64 + i] = regs->vr[32 + i];
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tmp, 0, -1);
+ return membuf_write(&to, &tmp, sizeof(tmp));
#else
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
+ return membuf_write(&to, regs, sizeof(*regs));
#endif
}
@@ -168,16 +170,16 @@ static const struct user_regset csky_regsets[] = {
.n = sizeof(struct pt_regs) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
- .get = &gpr_get,
- .set = &gpr_set,
+ .regset_get = gpr_get,
+ .set = gpr_set,
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_fp) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
- .get = &fpr_get,
- .set = &fpr_set,
+ .regset_get = fpr_get,
+ .set = fpr_set,
},
};
@@ -193,6 +195,109 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &user_csky_view;
}
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(tls),
+ REG_OFFSET_NAME(lr),
+ REG_OFFSET_NAME(pc),
+ REG_OFFSET_NAME(sr),
+ REG_OFFSET_NAME(usp),
+ REG_OFFSET_NAME(orig_a0),
+ REG_OFFSET_NAME(a0),
+ REG_OFFSET_NAME(a1),
+ REG_OFFSET_NAME(a2),
+ REG_OFFSET_NAME(a3),
+ REG_OFFSET_NAME(regs[0]),
+ REG_OFFSET_NAME(regs[1]),
+ REG_OFFSET_NAME(regs[2]),
+ REG_OFFSET_NAME(regs[3]),
+ REG_OFFSET_NAME(regs[4]),
+ REG_OFFSET_NAME(regs[5]),
+ REG_OFFSET_NAME(regs[6]),
+ REG_OFFSET_NAME(regs[7]),
+ REG_OFFSET_NAME(regs[8]),
+ REG_OFFSET_NAME(regs[9]),
+#if defined(__CSKYABIV2__)
+ REG_OFFSET_NAME(exregs[0]),
+ REG_OFFSET_NAME(exregs[1]),
+ REG_OFFSET_NAME(exregs[2]),
+ REG_OFFSET_NAME(exregs[3]),
+ REG_OFFSET_NAME(exregs[4]),
+ REG_OFFSET_NAME(exregs[5]),
+ REG_OFFSET_NAME(exregs[6]),
+ REG_OFFSET_NAME(exregs[7]),
+ REG_OFFSET_NAME(exregs[8]),
+ REG_OFFSET_NAME(exregs[9]),
+ REG_OFFSET_NAME(exregs[10]),
+ REG_OFFSET_NAME(exregs[11]),
+ REG_OFFSET_NAME(exregs[12]),
+ REG_OFFSET_NAME(exregs[13]),
+ REG_OFFSET_NAME(exregs[14]),
+ REG_OFFSET_NAME(rhi),
+ REG_OFFSET_NAME(rlo),
+ REG_OFFSET_NAME(dcsr),
+#endif
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+ return (addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
+
void ptrace_disable(struct task_struct *child)
{
singlestep_disable(child);
@@ -212,16 +317,20 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
-asmlinkage void syscall_trace_enter(struct pt_regs *regs)
+asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
- if (tracehook_report_syscall_entry(regs))
- syscall_set_nr(current, regs, -1);
+ if (ptrace_report_syscall_entry(regs))
+ return -1;
+
+ if (secure_computing() == -1)
+ return -1;
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, syscall_get_nr(current, regs));
audit_syscall_entry(regs_syscallid(regs), regs->a0, regs->a1, regs->a2, regs->a3);
+ return 0;
}
asmlinkage void syscall_trace_exit(struct pt_regs *regs)
@@ -229,19 +338,132 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs)
audit_syscall_exit(regs);
if (test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, 0);
+ ptrace_report_syscall_exit(regs, 0);
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_exit(regs, syscall_get_return_value(current, regs));
}
-extern void show_stack(struct task_struct *task, unsigned long *stack);
-void show_regs(struct pt_regs *fp)
+#ifdef CONFIG_CPU_CK860
+static void show_iutlb(void)
{
- unsigned long *sp;
- unsigned char *tp;
- int i;
+ int entry, i;
+ unsigned long flags;
+ unsigned long oldpid;
+ unsigned long entryhi[16], entrylo0[16], entrylo1[16];
+
+ oldpid = read_mmu_entryhi();
+
+ entry = 0x8000;
+
+ local_irq_save(flags);
+
+ for (i = 0; i < 16; i++) {
+ write_mmu_index(entry);
+ tlb_read();
+ entryhi[i] = read_mmu_entryhi();
+ entrylo0[i] = read_mmu_entrylo0();
+ entrylo1[i] = read_mmu_entrylo1();
+
+ entry++;
+ }
+
+ local_irq_restore(flags);
+
+ write_mmu_entryhi(oldpid);
+
+ printk("\n\n\n");
+ for (i = 0; i < 16; i++)
+ printk("iutlb[%d]: entryhi - 0x%lx; entrylo0 - 0x%lx;"
+ " entrylo1 - 0x%lx\n",
+ i, entryhi[i], entrylo0[i], entrylo1[i]);
+ printk("\n\n\n");
+}
+static void show_dutlb(void)
+{
+ int entry, i;
+ unsigned long flags;
+ unsigned long oldpid;
+ unsigned long entryhi[16], entrylo0[16], entrylo1[16];
+
+ oldpid = read_mmu_entryhi();
+
+ entry = 0x4000;
+
+ local_irq_save(flags);
+
+ for (i = 0; i < 16; i++) {
+ write_mmu_index(entry);
+ tlb_read();
+ entryhi[i] = read_mmu_entryhi();
+ entrylo0[i] = read_mmu_entrylo0();
+ entrylo1[i] = read_mmu_entrylo1();
+
+ entry++;
+ }
+
+ local_irq_restore(flags);
+
+ write_mmu_entryhi(oldpid);
+
+ printk("\n\n\n");
+ for (i = 0; i < 16; i++)
+ printk("dutlb[%d]: entryhi - 0x%lx; entrylo0 - 0x%lx;"
+ " entrylo1 - 0x%lx\n",
+ i, entryhi[i], entrylo0[i], entrylo1[i]);
+ printk("\n\n\n");
+}
+
+static unsigned long entryhi[1024], entrylo0[1024], entrylo1[1024];
+static void show_jtlb(void)
+{
+ int entry;
+ unsigned long flags;
+ unsigned long oldpid;
+
+ oldpid = read_mmu_entryhi();
+
+ entry = 0;
+
+ local_irq_save(flags);
+ while (entry < 1024) {
+ write_mmu_index(entry);
+ tlb_read();
+ entryhi[entry] = read_mmu_entryhi();
+ entrylo0[entry] = read_mmu_entrylo0();
+ entrylo1[entry] = read_mmu_entrylo1();
+
+ entry++;
+ }
+ local_irq_restore(flags);
+
+ write_mmu_entryhi(oldpid);
+
+ printk("\n\n\n");
+
+ for (entry = 0; entry < 1024; entry++)
+ printk("jtlb[%x]: entryhi - 0x%lx; entrylo0 - 0x%lx;"
+ " entrylo1 - 0x%lx\n",
+ entry, entryhi[entry], entrylo0[entry], entrylo1[entry]);
+ printk("\n\n\n");
+}
+
+static void show_tlb(void)
+{
+ show_iutlb();
+ show_dutlb();
+ show_jtlb();
+}
+#else
+static void show_tlb(void)
+{
+ return;
+}
+#endif
+
+void show_regs(struct pt_regs *fp)
+{
pr_info("\nCURRENT PROCESS:\n\n");
pr_info("COMM=%s PID=%d\n", current->comm, current->pid);
@@ -260,9 +482,10 @@ void show_regs(struct pt_regs *fp)
pr_info("PC: 0x%08lx (%pS)\n", (long)fp->pc, (void *)fp->pc);
pr_info("LR: 0x%08lx (%pS)\n", (long)fp->lr, (void *)fp->lr);
- pr_info("SP: 0x%08lx\n", (long)fp);
- pr_info("orig_a0: 0x%08lx\n", fp->orig_a0);
+ pr_info("SP: 0x%08lx\n", (long)fp->usp);
pr_info("PSR: 0x%08lx\n", (long)fp->sr);
+ pr_info("orig_a0: 0x%08lx\n", fp->orig_a0);
+ pr_info("PT_REGS: 0x%08lx\n", (long)fp);
pr_info(" a0: 0x%08lx a1: 0x%08lx a2: 0x%08lx a3: 0x%08lx\n",
fp->a0, fp->a1, fp->a2, fp->a3);
@@ -288,29 +511,11 @@ void show_regs(struct pt_regs *fp)
fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]);
pr_info("r10: 0x%08lx r11: 0x%08lx r12: 0x%08lx r13: 0x%08lx\n",
fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]);
- pr_info("r14: 0x%08lx r1: 0x%08lx r15: 0x%08lx\n",
- fp->regs[8], fp->regs[9], fp->lr);
+ pr_info("r14: 0x%08lx r1: 0x%08lx\n",
+ fp->regs[8], fp->regs[9]);
#endif
- pr_info("\nCODE:");
- tp = ((unsigned char *) fp->pc) - 0x20;
- tp += ((int)tp % 4) ? 2 : 0;
- for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) {
- if ((i % 0x10) == 0)
- pr_cont("\n%08x: ", (int) (tp + i));
- pr_cont("%08x ", (int) *sp++);
- }
- pr_cont("\n");
-
- pr_info("\nKERNEL STACK:");
- tp = ((unsigned char *) fp) - 0x40;
- for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
- if ((i % 0x10) == 0)
- pr_cont("\n%08x: ", (int) (tp + i));
- pr_cont("%08x ", (int) *sp++);
- }
- pr_cont("\n");
+ show_tlb();
- show_stack(NULL, (unsigned long *)fp->regs[4]);
return;
}
diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
index 23ee604aafdb..51012e90780d 100644
--- a/arch/csky/kernel/setup.c
+++ b/arch/csky/kernel/setup.c
@@ -7,104 +7,54 @@
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/start_kernel.h>
-#include <linux/dma-contiguous.h>
-#include <linux/screen_info.h>
+#include <linux/dma-map-ops.h>
#include <asm/sections.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
-#ifdef CONFIG_DUMMY_CONSOLE
-struct screen_info screen_info = {
- .orig_video_lines = 30,
- .orig_video_cols = 80,
- .orig_video_mode = 0,
- .orig_video_ega_bx = 0,
- .orig_video_isVGA = 1,
- .orig_video_points = 8
-};
-#endif
-
-phys_addr_t __init_memblock memblock_end_of_REG0(void)
-{
- return (memblock.memory.regions[0].base +
- memblock.memory.regions[0].size);
-}
-
-phys_addr_t __init_memblock memblock_start_of_REG1(void)
-{
- return memblock.memory.regions[1].base;
-}
-
-size_t __init_memblock memblock_size_of_REG1(void)
-{
- return memblock.memory.regions[1].size;
-}
-
static void __init csky_memblock_init(void)
{
- unsigned long zone_size[MAX_NR_ZONES];
- unsigned long zhole_size[MAX_NR_ZONES];
+ unsigned long lowmem_size = PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
+ unsigned long sseg_size = PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET);
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
signed long size;
- memblock_reserve(__pa(_stext), _end - _stext);
-#ifdef CONFIG_BLK_DEV_INITRD
- memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
-#endif
+ memblock_reserve(__pa(_start), _end - _start);
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
memblock_dump_all();
- memset(zone_size, 0, sizeof(zone_size));
- memset(zhole_size, 0, sizeof(zhole_size));
-
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
- max_pfn = PFN_DOWN(memblock_end_of_DRAM());
-
- max_low_pfn = PFN_UP(memblock_end_of_REG0());
- if (max_low_pfn == 0)
- max_low_pfn = max_pfn;
+ max_low_pfn = max_pfn = PFN_DOWN(memblock_end_of_DRAM());
size = max_pfn - min_low_pfn;
- if (memblock.memory.cnt > 1) {
- zone_size[ZONE_NORMAL] =
- PFN_DOWN(memblock_start_of_REG1()) - min_low_pfn;
- zhole_size[ZONE_NORMAL] =
- PFN_DOWN(memblock_start_of_REG1()) - max_low_pfn;
- } else {
- if (size <= PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET))
- zone_size[ZONE_NORMAL] = max_pfn - min_low_pfn;
- else {
- zone_size[ZONE_NORMAL] =
- PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
- max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
- }
+ if (size >= lowmem_size) {
+ max_low_pfn = min_low_pfn + lowmem_size;
+#ifdef CONFIG_PAGE_OFFSET_80000000
+ write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE);
+#endif
+ } else if (size > sseg_size) {
+ max_low_pfn = min_low_pfn + sseg_size;
}
-#ifdef CONFIG_HIGHMEM
- size = 0;
- if (memblock.memory.cnt > 1) {
- size = PFN_DOWN(memblock_size_of_REG1());
- highstart_pfn = PFN_DOWN(memblock_start_of_REG1());
- } else {
- size = max_pfn - min_low_pfn -
- PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
- highstart_pfn = min_low_pfn +
- PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
- }
+ max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
- if (size > 0)
- zone_size[ZONE_HIGHMEM] = size;
+ mmu_init(min_low_pfn, max_low_pfn);
- highend_pfn = max_pfn;
+#ifdef CONFIG_HIGHMEM
+ max_zone_pfn[ZONE_HIGHMEM] = max_pfn;
+
+ highstart_pfn = max_low_pfn;
+ highend_pfn = max_pfn;
#endif
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
dma_contiguous_reserve(0);
- free_area_init_node(0, zone_size, min_low_pfn, zhole_size);
+ free_area_init(max_zone_pfn);
}
void __init setup_arch(char **cmdline_p)
@@ -116,10 +66,7 @@ void __init setup_arch(char **cmdline_p)
pr_info("Phys. mem: %ldMB\n",
(unsigned long) memblock_phys_mem_size()/1024/1024);
- init_mm.start_code = (unsigned long) _stext;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (unsigned long) _end;
+ setup_initial_init_mm(_start, _etext, _edata, _end);
parse_early_param();
@@ -133,28 +80,36 @@ void __init setup_arch(char **cmdline_p)
sparse_init();
+ fixaddr_init();
+
#ifdef CONFIG_HIGHMEM
kmap_init();
#endif
-
-#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
- conswitchp = &dummy_con;
-#endif
}
unsigned long va_pa_offset;
EXPORT_SYMBOL(va_pa_offset);
+static inline unsigned long read_mmu_msa(void)
+{
+#ifdef CONFIG_PAGE_OFFSET_80000000
+ return read_mmu_msa0();
+#endif
+
+#ifdef CONFIG_PAGE_OFFSET_A0000000
+ return read_mmu_msa1();
+#endif
+}
+
asmlinkage __visible void __init csky_start(unsigned int unused,
void *dtb_start)
{
/* Clean up bss section */
memset(__bss_start, 0, __bss_stop - __bss_start);
- va_pa_offset = read_mmu_msa0() & ~(SSEG_SIZE - 1);
+ va_pa_offset = read_mmu_msa() & ~(SSEG_SIZE - 1);
pre_trap_init();
- pre_mmu_init();
if (dtb_start == NULL)
early_init_dt_scan(__dtb_start);
diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c
index 9b1b7c039ddf..10da0fefd431 100644
--- a/arch/csky/kernel/signal.c
+++ b/arch/csky/kernel/signal.c
@@ -3,7 +3,7 @@
#include <linux/signal.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
-#include <linux/tracehook.h>
+#include <linux/resume_user_mode.h>
#include <asm/traps.h>
#include <asm/ucontext.h>
@@ -52,10 +52,14 @@ static long restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc)
{
int err = 0;
+ unsigned long sr = regs->sr;
/* sc_pt_regs is structured the same as the start of pt_regs */
err |= __copy_from_user(regs, &sc->sc_pt_regs, sizeof(struct pt_regs));
+ /* BIT(0) of regs->sr is Condition Code/Carry bit */
+ regs->sr = (sr & ~1) | (regs->sr & 1);
+
/* Restore the floating-point state. */
err |= restore_fpu_state(sc);
@@ -132,9 +136,8 @@ static inline void __user *get_sigframe(struct ksignal *ksig,
static int
setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
{
- struct rt_sigframe *frame;
+ struct rt_sigframe __user *frame;
int err = 0;
- struct csky_vdso *vdso = current->mm->context.vdso;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(frame, sizeof(*frame)))
@@ -152,7 +155,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
return -EFAULT;
/* Set up to return from userspace. */
- regs->lr = (unsigned long)(vdso->rt_signal_retcode);
+ regs->lr = (unsigned long)VDSO_SYMBOL(
+ current->mm->context.vdso, rt_sigreturn);
/*
* Set up registers for signal handler.
@@ -192,7 +196,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->a0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0;
regs->pc -= TRAP0_SIZE;
@@ -251,12 +255,13 @@ static void do_signal(struct pt_regs *regs)
asmlinkage void do_notify_resume(struct pt_regs *regs,
unsigned long thread_info_flags)
{
+ if (thread_info_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
/* Handle pending signal delivery */
- if (thread_info_flags & _TIF_SIGPENDING)
+ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs);
- if (thread_info_flags & _TIF_NOTIFY_RESUME) {
- clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(regs);
- }
+ if (thread_info_flags & _TIF_NOTIFY_RESUME)
+ resume_user_mode_work(regs);
}
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
index b753d382e4ce..92dbbf3e0205 100644
--- a/arch/csky/kernel/smp.c
+++ b/arch/csky/kernel/smp.c
@@ -12,8 +12,10 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/irq.h>
+#include <linux/irq_work.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
+#include <linux/seq_file.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
#include <linux/sched/hotplug.h>
@@ -21,22 +23,28 @@
#include <asm/traps.h>
#include <asm/sections.h>
#include <asm/mmu_context.h>
-#include <asm/pgalloc.h>
-
-struct ipi_data_struct {
- unsigned long bits ____cacheline_aligned;
-};
-static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data);
+#ifdef CONFIG_CPU_HAS_FPU
+#include <abi/fpu.h>
+#endif
enum ipi_message_type {
IPI_EMPTY,
IPI_RESCHEDULE,
IPI_CALL_FUNC,
+ IPI_IRQ_WORK,
IPI_MAX
};
+struct ipi_data_struct {
+ unsigned long bits ____cacheline_aligned;
+ unsigned long stats[IPI_MAX] ____cacheline_aligned;
+};
+static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data);
+
static irqreturn_t handle_ipi(int irq, void *dev)
{
+ unsigned long *stats = this_cpu_ptr(&ipi_data)->stats;
+
while (true) {
unsigned long ops;
@@ -44,11 +52,20 @@ static irqreturn_t handle_ipi(int irq, void *dev)
if (ops == 0)
return IRQ_HANDLED;
- if (ops & (1 << IPI_RESCHEDULE))
+ if (ops & (1 << IPI_RESCHEDULE)) {
+ stats[IPI_RESCHEDULE]++;
scheduler_ipi();
+ }
- if (ops & (1 << IPI_CALL_FUNC))
+ if (ops & (1 << IPI_CALL_FUNC)) {
+ stats[IPI_CALL_FUNC]++;
generic_smp_call_function_interrupt();
+ }
+
+ if (ops & (1 << IPI_IRQ_WORK)) {
+ stats[IPI_IRQ_WORK]++;
+ irq_work_run();
+ }
BUG_ON((ops >> IPI_MAX) != 0);
}
@@ -80,6 +97,29 @@ send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
send_arch_ipi(to_whom);
}
+static const char * const ipi_names[] = {
+ [IPI_EMPTY] = "Empty interrupts",
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
+ [IPI_CALL_FUNC] = "Function call interrupts",
+ [IPI_IRQ_WORK] = "Irq work interrupts",
+};
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ unsigned int cpu, i;
+
+ for (i = 0; i < IPI_MAX; i++) {
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
+ prec >= 4 ? " " : "");
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10lu ",
+ per_cpu_ptr(&ipi_data, cpu)->stats[i]);
+ seq_printf(p, " %s\n", ipi_names[i]);
+ }
+
+ return 0;
+}
+
void arch_send_call_function_ipi_mask(struct cpumask *mask)
{
send_ipi_message(mask, IPI_CALL_FUNC);
@@ -100,14 +140,17 @@ void smp_send_stop(void)
on_each_cpu(ipi_stop, NULL, 1);
}
-void smp_send_reschedule(int cpu)
+void arch_smp_send_reschedule(int cpu)
{
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
}
-void __init smp_prepare_boot_cpu(void)
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
{
+ send_ipi_message(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
}
+#endif
void __init smp_prepare_cpus(unsigned int max_cpus)
{
@@ -120,7 +163,7 @@ void __init setup_smp_ipi(void)
int rc;
if (ipi_irq == 0)
- panic("%s IRQ mapping failed\n", __func__);
+ return;
rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
&ipi_dummy_dev);
@@ -133,15 +176,13 @@ void __init setup_smp_ipi(void)
void __init setup_smp(void)
{
struct device_node *node = NULL;
- int cpu;
+ unsigned int cpu;
for_each_of_cpu_node(node) {
if (!of_device_is_available(node))
continue;
- if (of_property_read_u32(node, "reg", &cpu))
- continue;
-
+ cpu = of_get_cpu_hwid(node, 0);
if (cpu >= NR_CPUS)
continue;
@@ -153,8 +194,11 @@ void __init setup_smp(void)
extern void _start_smp_secondary(void);
volatile unsigned int secondary_hint;
+volatile unsigned int secondary_hint2;
volatile unsigned int secondary_ccr;
volatile unsigned int secondary_stack;
+volatile unsigned int secondary_msa1;
+volatile unsigned int secondary_pgd;
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
@@ -163,7 +207,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
secondary_stack =
(unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
secondary_hint = mfcr("cr31");
+ secondary_hint2 = mfcr("cr<21, 1>");
secondary_ccr = mfcr("cr18");
+ secondary_msa1 = read_mmu_msa1();
+ secondary_pgd = mfcr("cr<29, 15>");
/*
* Because other CPUs are in reset status, we must flush data
@@ -192,25 +239,19 @@ void __init smp_cpus_done(unsigned int max_cpus)
{
}
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
void csky_start_secondary(void)
{
struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id();
mtcr("cr31", secondary_hint);
+ mtcr("cr<21, 1>", secondary_hint2);
mtcr("cr18", secondary_ccr);
mtcr("vbr", vec_base);
flush_tlb_all();
write_mmu_pagemask(0);
- TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);
- TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir);
#ifdef CONFIG_CPU_HAS_FPU
init_fpu();
@@ -229,7 +270,6 @@ void csky_start_secondary(void)
pr_info("CPU%u Online: %s...\n", cpu, __func__);
local_irq_enable();
- preempt_disable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
@@ -247,25 +287,21 @@ int __cpu_disable(void)
return 0;
}
-void __cpu_die(unsigned int cpu)
+void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
{
- if (!cpu_wait_death(cpu, 5)) {
- pr_crit("CPU%u: shutdown failed\n", cpu);
- return;
- }
pr_notice("CPU%u: shutdown\n", cpu);
}
-void arch_cpu_idle_dead(void)
+void __noreturn arch_cpu_idle_dead(void)
{
idle_task_exit();
- cpu_report_death();
+ cpuhp_ap_report_dead();
while (!secondary_stack)
arch_cpu_idle();
- local_irq_disable();
+ raw_local_irq_disable();
asm volatile(
"mov sp, %0\n"
@@ -273,5 +309,7 @@ void arch_cpu_idle_dead(void)
"jmpi csky_start_secondary"
:
: "r" (secondary_stack));
+
+ BUG();
}
#endif
diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c
index fec777a643f1..27ecd63e321b 100644
--- a/arch/csky/kernel/stacktrace.c
+++ b/arch/csky/kernel/stacktrace.c
@@ -1,57 +1,156 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. */
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <linux/ftrace.h>
+#include <linux/ptrace.h>
-void save_stack_trace(struct stack_trace *trace)
+#ifdef CONFIG_FRAME_POINTER
+
+struct stackframe {
+ unsigned long fp;
+ unsigned long ra;
+};
+
+void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ bool (*fn)(unsigned long, void *), void *arg)
{
- save_stack_trace_tsk(current, trace);
+ unsigned long fp, sp, pc;
+
+ if (regs) {
+ fp = frame_pointer(regs);
+ sp = user_stack_pointer(regs);
+ pc = instruction_pointer(regs);
+ } else if (task == NULL || task == current) {
+ const register unsigned long current_fp __asm__ ("r8");
+ fp = current_fp;
+ sp = current_stack_pointer;
+ pc = (unsigned long)walk_stackframe;
+ } else {
+ /* task blocked in __switch_to */
+ fp = thread_saved_fp(task);
+ sp = thread_saved_sp(task);
+ pc = thread_saved_lr(task);
+ }
+
+ for (;;) {
+ unsigned long low, high;
+ struct stackframe *frame;
+
+ if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
+ break;
+
+ /* Validate frame pointer */
+ low = sp;
+ high = ALIGN(sp, THREAD_SIZE);
+ if (unlikely(fp < low || fp > high || fp & 0x3))
+ break;
+ /* Unwind stack frame */
+ frame = (struct stackframe *)fp;
+ sp = fp;
+ fp = frame->fp;
+ pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
+ (unsigned long *)(fp - 8));
+ }
}
-EXPORT_SYMBOL_GPL(save_stack_trace);
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+#else /* !CONFIG_FRAME_POINTER */
+
+static void notrace walk_stackframe(struct task_struct *task,
+ struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
{
- unsigned long *fp, *stack_start, *stack_end;
- unsigned long addr;
- int skip = trace->skip;
- int savesched;
- int graph_idx = 0;
+ unsigned long sp, pc;
+ unsigned long *ksp;
- if (tsk == current) {
- asm volatile("mov %0, r8\n":"=r"(fp));
- savesched = 1;
+ if (regs) {
+ sp = user_stack_pointer(regs);
+ pc = instruction_pointer(regs);
+ } else if (task == NULL || task == current) {
+ sp = current_stack_pointer;
+ pc = (unsigned long)walk_stackframe;
} else {
- fp = (unsigned long *)thread_saved_fp(tsk);
- savesched = 0;
+ /* task blocked in __switch_to */
+ sp = thread_saved_sp(task);
+ pc = thread_saved_lr(task);
}
- addr = (unsigned long) fp & THREAD_MASK;
- stack_start = (unsigned long *) addr;
- stack_end = (unsigned long *) (addr + THREAD_SIZE);
-
- while (fp > stack_start && fp < stack_end) {
- unsigned long lpp, fpp;
+ if (unlikely(sp & 0x3))
+ return;
- fpp = fp[0];
- lpp = fp[1];
- if (!__kernel_text_address(lpp))
+ ksp = (unsigned long *)sp;
+ while (!kstack_end(ksp)) {
+ if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
break;
- else
- lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
-
- if (savesched || !in_sched_functions(lpp)) {
- if (skip) {
- skip--;
- } else {
- trace->entries[trace->nr_entries++] = lpp;
- if (trace->nr_entries >= trace->max_entries)
- break;
- }
- }
- fp = (unsigned long *)fpp;
+ pc = (*ksp++) - 0x4;
}
}
+#endif /* CONFIG_FRAME_POINTER */
+
+static bool print_trace_address(unsigned long pc, void *arg)
+{
+ print_ip_sym((const char *)arg, pc);
+ return false;
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+{
+ pr_cont("Call Trace:\n");
+ walk_stackframe(task, NULL, print_trace_address, (void *)loglvl);
+}
+
+static bool save_wchan(unsigned long pc, void *arg)
+{
+ if (!in_sched_functions(pc)) {
+ unsigned long *p = arg;
+ *p = pc;
+ return true;
+ }
+ return false;
+}
+
+unsigned long __get_wchan(struct task_struct *task)
+{
+ unsigned long pc = 0;
+
+ walk_stackframe(task, NULL, save_wchan, &pc);
+ return pc;
+}
+
+#ifdef CONFIG_STACKTRACE
+static bool __save_trace(unsigned long pc, void *arg, bool nosched)
+{
+ struct stack_trace *trace = arg;
+
+ if (unlikely(nosched && in_sched_functions(pc)))
+ return false;
+ if (unlikely(trace->skip > 0)) {
+ trace->skip--;
+ return false;
+ }
+
+ trace->entries[trace->nr_entries++] = pc;
+ return (trace->nr_entries >= trace->max_entries);
+}
+
+static bool save_trace(unsigned long pc, void *arg)
+{
+ return __save_trace(pc, arg, false);
+}
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ walk_stackframe(tsk, NULL, save_trace, trace);
+}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ save_stack_trace_tsk(NULL, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+#endif /* CONFIG_STACKTRACE */
diff --git a/arch/csky/kernel/time.c b/arch/csky/kernel/time.c
index b5fc9447d93f..52379d866fe4 100644
--- a/arch/csky/kernel/time.c
+++ b/arch/csky/kernel/time.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
-#include <linux/clk-provider.h>
#include <linux/clocksource.h>
+#include <linux/of_clk.h>
void __init time_init(void)
{
diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c
index b057480e7463..c2246b07cc9c 100644
--- a/arch/csky/kernel/traps.c
+++ b/arch/csky/kernel/traps.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/kernel.h>
@@ -14,6 +15,9 @@
#include <linux/kallsyms.h>
#include <linux/rtc.h>
#include <linux/uaccess.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/sched/debug.h>
#include <asm/setup.h>
#include <asm/traps.h>
@@ -26,6 +30,8 @@
#include <abi/fpu.h>
#endif
+int show_unhandled_signals = 1;
+
/* Defined in entry.S */
asmlinkage void csky_trap(void);
@@ -34,9 +40,7 @@ asmlinkage void csky_cmpxchg(void);
asmlinkage void csky_get_tls(void);
asmlinkage void csky_irq(void);
-asmlinkage void csky_tlbinvalidl(void);
-asmlinkage void csky_tlbinvalids(void);
-asmlinkage void csky_tlbmodified(void);
+asmlinkage void csky_pagefault(void);
/* Defined in head.S */
asmlinkage void _start_smp_secondary(void);
@@ -61,9 +65,9 @@ void __init trap_init(void)
VEC_INIT(VEC_TRAP3, csky_get_tls);
/* setup MMU TLB exception */
- VEC_INIT(VEC_TLBINVALIDL, csky_tlbinvalidl);
- VEC_INIT(VEC_TLBINVALIDS, csky_tlbinvalids);
- VEC_INIT(VEC_TLBMODIFIED, csky_tlbmodified);
+ VEC_INIT(VEC_TLBINVALIDL, csky_pagefault);
+ VEC_INIT(VEC_TLBINVALIDS, csky_pagefault);
+ VEC_INIT(VEC_TLBMODIFIED, csky_pagefault);
#ifdef CONFIG_CPU_HAS_FPU
init_fpu();
@@ -76,93 +80,184 @@ void __init trap_init(void)
#endif
}
-void die_if_kernel(char *str, struct pt_regs *regs, int nr)
+static DEFINE_SPINLOCK(die_lock);
+
+void die(struct pt_regs *regs, const char *str)
{
- if (user_mode(regs))
- return;
+ static int die_counter;
+ int ret;
+ oops_enter();
+
+ spin_lock_irq(&die_lock);
console_verbose();
- pr_err("%s: %08x\n", str, nr);
+ bust_spinlocks(1);
+
+ pr_emerg("%s [#%d]\n", str, ++die_counter);
+ print_modules();
show_regs(regs);
+ show_stack(current, (unsigned long *)regs->regs[4], KERN_INFO);
+
+ ret = notify_die(DIE_OOPS, str, regs, 0, trap_no(regs), SIGSEGV);
+
+ bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- do_exit(SIGSEGV);
+ spin_unlock_irq(&die_lock);
+ oops_exit();
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
+ if (ret != NOTIFY_STOP)
+ make_task_dead(SIGSEGV);
}
-void buserr(struct pt_regs *regs)
+void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
{
-#ifdef CONFIG_CPU_CK810
- static unsigned long prev_pc;
+ struct task_struct *tsk = current;
- if ((regs->pc == prev_pc) && prev_pc != 0) {
- prev_pc = 0;
+ if (show_unhandled_signals && unhandled_signal(tsk, signo)
+ && printk_ratelimit()) {
+ pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x%08lx",
+ tsk->comm, task_pid_nr(tsk), signo, code, addr);
+ print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
+ pr_cont("\n");
+ show_regs(regs);
+ }
+
+ force_sig_fault(signo, code, (void __user *)addr);
+}
+
+static void do_trap_error(struct pt_regs *regs, int signo, int code,
+ unsigned long addr, const char *str)
+{
+ current->thread.trap_no = trap_no(regs);
+
+ if (user_mode(regs)) {
+ do_trap(regs, signo, code, addr);
} else {
- prev_pc = regs->pc;
- return;
+ if (!fixup_exception(regs))
+ die(regs, str);
}
+}
+
+#define DO_ERROR_INFO(name, signo, code, str) \
+asmlinkage __visible void name(struct pt_regs *regs) \
+{ \
+ do_trap_error(regs, signo, code, regs->pc, "Oops - " str); \
+}
+
+DO_ERROR_INFO(do_trap_unknown,
+ SIGILL, ILL_ILLTRP, "unknown exception");
+DO_ERROR_INFO(do_trap_zdiv,
+ SIGFPE, FPE_INTDIV, "error zero div exception");
+DO_ERROR_INFO(do_trap_buserr,
+ SIGSEGV, ILL_ILLADR, "error bus error exception");
+
+asmlinkage void do_trap_misaligned(struct pt_regs *regs)
+{
+#ifdef CONFIG_CPU_NEED_SOFTALIGN
+ csky_alignment(regs);
+#else
+ current->thread.trap_no = trap_no(regs);
+ do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->pc,
+ "Oops - load/store address misaligned");
#endif
+}
- die_if_kernel("Kernel mode BUS error", regs, 0);
+asmlinkage void do_trap_bkpt(struct pt_regs *regs)
+{
+#ifdef CONFIG_KPROBES
+ if (kprobe_single_step_handler(regs))
+ return;
+#endif
+#ifdef CONFIG_UPROBES
+ if (uprobe_single_step_handler(regs))
+ return;
+#endif
+ if (user_mode(regs)) {
+ send_sig(SIGTRAP, current, 0);
+ return;
+ }
- pr_err("User mode Bus Error\n");
- show_regs(regs);
+ do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->pc,
+ "Oops - illegal trap exception");
+}
- force_sig_fault(SIGSEGV, 0, (void __user *)regs->pc);
+asmlinkage void do_trap_illinsn(struct pt_regs *regs)
+{
+ current->thread.trap_no = trap_no(regs);
+
+#ifdef CONFIG_KPROBES
+ if (kprobe_breakpoint_handler(regs))
+ return;
+#endif
+#ifdef CONFIG_UPROBES
+ if (uprobe_breakpoint_handler(regs))
+ return;
+#endif
+#ifndef CONFIG_CPU_NO_USER_BKPT
+ if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT) {
+ send_sig(SIGTRAP, current, 0);
+ return;
+ }
+#endif
+
+ do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc,
+ "Oops - illegal instruction exception");
}
-#define USR_BKPT 0x1464
-asmlinkage void trap_c(struct pt_regs *regs)
+asmlinkage void do_trap_fpe(struct pt_regs *regs)
{
- int sig;
- unsigned long vector;
- siginfo_t info;
+#ifdef CONFIG_CPU_HAS_FPU
+ return fpu_fpe(regs);
+#else
+ do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc,
+ "Oops - fpu instruction exception");
+#endif
+}
- vector = (mfcr("psr") >> 16) & 0xff;
+asmlinkage void do_trap_priv(struct pt_regs *regs)
+{
+#ifdef CONFIG_CPU_HAS_FPU
+ if (user_mode(regs) && fpu_libc_helper(regs))
+ return;
+#endif
+ do_trap_error(regs, SIGILL, ILL_PRVOPC, regs->pc,
+ "Oops - illegal privileged exception");
+}
- switch (vector) {
+asmlinkage void trap_c(struct pt_regs *regs)
+{
+ switch (trap_no(regs)) {
case VEC_ZERODIV:
- die_if_kernel("Kernel mode ZERO DIV", regs, vector);
- sig = SIGFPE;
+ do_trap_zdiv(regs);
break;
- /* ptrace */
case VEC_TRACE:
- info.si_code = TRAP_TRACE;
- sig = SIGTRAP;
+ do_trap_bkpt(regs);
break;
case VEC_ILLEGAL:
- die_if_kernel("Kernel mode ILLEGAL", regs, vector);
-#ifndef CONFIG_CPU_NO_USER_BKPT
- if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT)
-#endif
- {
- sig = SIGILL;
- break;
- }
- /* gdbserver breakpoint */
+ do_trap_illinsn(regs);
+ break;
case VEC_TRAP1:
- /* jtagserver breakpoint */
case VEC_BREAKPOINT:
- die_if_kernel("Kernel mode BKPT", regs, vector);
- info.si_code = TRAP_BRKPT;
- sig = SIGTRAP;
+ do_trap_bkpt(regs);
break;
case VEC_ACCESS:
- return buserr(regs);
-#ifdef CONFIG_CPU_NEED_SOFTALIGN
+ do_trap_buserr(regs);
+ break;
case VEC_ALIGN:
- return csky_alignment(regs);
-#endif
-#ifdef CONFIG_CPU_HAS_FPU
+ do_trap_misaligned(regs);
+ break;
case VEC_FPE:
- die_if_kernel("Kernel mode FPE", regs, vector);
- return fpu_fpe(regs);
+ do_trap_fpe(regs);
+ break;
case VEC_PRIV:
- die_if_kernel("Kernel mode PRIV", regs, vector);
- if (fpu_libc_helper(regs))
- return;
-#endif
+ do_trap_priv(regs);
+ break;
default:
- sig = SIGSEGV;
+ do_trap_unknown(regs);
break;
}
- send_sig(sig, current, 0);
}
diff --git a/arch/csky/kernel/vdso.c b/arch/csky/kernel/vdso.c
index 60ff7adfad1d..2ca886e4a458 100644
--- a/arch/csky/kernel/vdso.c
+++ b/arch/csky/kernel/vdso.c
@@ -1,86 +1,97 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/init.h>
#include <linux/binfmts.h>
#include <linux/elf.h>
-#include <linux/vmalloc.h>
-#include <linux/unistd.h>
-#include <linux/uaccess.h>
-
-#include <asm/vdso.h>
-#include <asm/cacheflush.h>
-
-static struct page *vdso_page;
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
-static int __init init_vdso(void)
-{
- struct csky_vdso *vdso;
- int err = 0;
+#include <asm/page.h>
+#include <vdso/datapage.h>
- vdso_page = alloc_page(GFP_KERNEL);
- if (!vdso_page)
- panic("Cannot allocate vdso");
+extern char vdso_start[], vdso_end[];
- vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
- if (!vdso)
- panic("Cannot map vdso");
+static unsigned int vdso_pages;
+static struct page **vdso_pagelist;
- clear_page(vdso);
+static union vdso_data_store vdso_data_store __page_aligned_data;
+struct vdso_data *vdso_data = vdso_data_store.data;
- err = setup_vdso_page(vdso->rt_signal_retcode);
- if (err)
- panic("Cannot set signal return code, err: %x.", err);
+static int __init vdso_init(void)
+{
+ unsigned int i;
+
+ vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
+ vdso_pagelist =
+ kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
+ if (unlikely(vdso_pagelist == NULL)) {
+ pr_err("vdso: pagelist allocation failed\n");
+ return -ENOMEM;
+ }
- dcache_wb_range((unsigned long)vdso, (unsigned long)vdso + 16);
+ for (i = 0; i < vdso_pages; i++) {
+ struct page *pg;
- vunmap(vdso);
+ pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
+ vdso_pagelist[i] = pg;
+ }
+ vdso_pagelist[i] = virt_to_page(vdso_data);
return 0;
}
-subsys_initcall(init_vdso);
+arch_initcall(vdso_init);
-int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp)
{
- int ret;
- unsigned long addr;
struct mm_struct *mm = current->mm;
+ unsigned long vdso_base, vdso_len;
+ int ret;
- down_write(&mm->mmap_sem);
+ vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
- addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0);
- if (IS_ERR_VALUE(addr)) {
- ret = addr;
- goto up_fail;
+ mmap_write_lock(mm);
+ vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
+ if (IS_ERR_VALUE(vdso_base)) {
+ ret = vdso_base;
+ goto end;
}
- ret = install_special_mapping(
- mm,
- addr,
- PAGE_SIZE,
- VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- &vdso_page);
- if (ret)
- goto up_fail;
+ /*
+ * Put vDSO base into mm struct. We need to do this before calling
+ * install_special_mapping or the perf counter mmap tracking code
+ * will fail to recognise it as a vDSO (since arch_vma_name fails).
+ */
+ mm->context.vdso = (void *)vdso_base;
+
+ ret =
+ install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+ (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
+ vdso_pagelist);
+
+ if (unlikely(ret)) {
+ mm->context.vdso = NULL;
+ goto end;
+ }
- mm->context.vdso = (void *)addr;
+ vdso_base += (vdso_pages << PAGE_SHIFT);
+ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
-up_fail:
- up_write(&mm->mmap_sem);
+ if (unlikely(ret))
+ mm->context.vdso = NULL;
+end:
+ mmap_write_unlock(mm);
return ret;
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
- if (vma->vm_mm == NULL)
- return NULL;
-
- if (vma->vm_start == (long)vma->vm_mm->context.vdso)
+ if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
return "[vdso]";
- else
- return NULL;
+ if (vma->vm_mm && (vma->vm_start ==
+ (long)vma->vm_mm->context.vdso + PAGE_SIZE))
+ return "[vdso_data]";
+ return NULL;
}
diff --git a/arch/csky/kernel/vdso/.gitignore b/arch/csky/kernel/vdso/.gitignore
new file mode 100644
index 000000000000..3a19def868ec
--- /dev/null
+++ b/arch/csky/kernel/vdso/.gitignore
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vdso.lds
+*.tmp
+vdso-syms.S
diff --git a/arch/csky/kernel/vdso/Makefile b/arch/csky/kernel/vdso/Makefile
new file mode 100644
index 000000000000..ddf784a62c11
--- /dev/null
+++ b/arch/csky/kernel/vdso/Makefile
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# Include the generic Makefile to check the built vdso.
+include $(srctree)/lib/vdso/Makefile
+
+# Symbols present in the vdso
+vdso-syms += rt_sigreturn
+vdso-syms += vgettimeofday
+
+# Files to link into the vdso
+obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
+
+ifneq ($(c-gettimeofday-y),)
+ CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+endif
+
+ccflags-y := -fno-stack-protector -DBUILD_VDSO32
+
+# Build rules
+targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
+obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+
+obj-y += vdso.o vdso-syms.o
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+
+# Disable gcov profiling for VDSO code
+GCOV_PROFILE := n
+KCOV_INSTRUMENT := n
+
+# Force dependency
+$(obj)/vdso.o: $(obj)/vdso.so
+
+SYSCFLAGS_vdso.so.dbg = $(c_flags)
+$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold)
+SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
+ -Wl,--build-id=sha1 -Wl,--hash-style=both
+
+$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE
+ $(call if_changed,so2s)
+
+# strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+# actual build commands
+# The DSO images are built using a special linker script
+# Make sure only to export the intended __vdso_xxx symbol offsets.
+quiet_cmd_vdsold = VDSOLD $@
+ cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
+ -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
+ $(CROSS_COMPILE)objcopy \
+ $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
+ rm $@.tmp
+
+# Extracts symbol offsets from the VDSO, converting them into an assembly file
+# that contains the same symbols at the same offsets.
+quiet_cmd_so2s = SO2S $@
+ cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@
diff --git a/arch/csky/kernel/vdso/note.S b/arch/csky/kernel/vdso/note.S
new file mode 100644
index 000000000000..2a956c942211
--- /dev/null
+++ b/arch/csky/kernel/vdso/note.S
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/elfnote.h>
+#include <linux/version.h>
+
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/csky/kernel/vdso/rt_sigreturn.S b/arch/csky/kernel/vdso/rt_sigreturn.S
new file mode 100644
index 000000000000..0a6bd1216118
--- /dev/null
+++ b/arch/csky/kernel/vdso/rt_sigreturn.S
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+#include <abi/vdso.h>
+
+ .text
+ENTRY(__vdso_rt_sigreturn)
+ .cfi_startproc
+ .cfi_signal_frame
+ SET_SYSCALL_ID
+ trap 0
+ .cfi_endproc
+ENDPROC(__vdso_rt_sigreturn)
diff --git a/arch/csky/kernel/vdso/so2s.sh b/arch/csky/kernel/vdso/so2s.sh
new file mode 100755
index 000000000000..69da3d529c6d
--- /dev/null
+++ b/arch/csky/kernel/vdso/so2s.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+
+sed 's!\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_5.10\)*!.global \2\n.set \2,0x\1!' \
+| grep '^\.'
diff --git a/arch/csky/kernel/vdso/vdso.S b/arch/csky/kernel/vdso/vdso.S
new file mode 100644
index 000000000000..5162ca069494
--- /dev/null
+++ b/arch/csky/kernel/vdso/vdso.S
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+ __PAGE_ALIGNED_DATA
+
+ .globl vdso_start, vdso_end
+ .balign PAGE_SIZE
+vdso_start:
+ .incbin "arch/csky/kernel/vdso/vdso.so"
+ .balign PAGE_SIZE
+vdso_end:
+
+ .previous
diff --git a/arch/csky/kernel/vdso/vdso.lds.S b/arch/csky/kernel/vdso/vdso.lds.S
new file mode 100644
index 000000000000..590a6c79fff7
--- /dev/null
+++ b/arch/csky/kernel/vdso/vdso.lds.S
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <asm/page.h>
+
+OUTPUT_ARCH(csky)
+
+SECTIONS
+{
+ PROVIDE(_vdso_data = . + PAGE_SIZE);
+ . = SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+
+ . = 0x800;
+ .text : { *(.text .text.*) } :text
+
+ .data : {
+ *(.got.plt) *(.got)
+ *(.data .data.* .gnu.linkonce.d.*)
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ }
+}
+
+PHDRS
+{
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+VERSION
+{
+ LINUX_5.10 {
+ global:
+ __vdso_rt_sigreturn;
+ __vdso_clock_gettime;
+ __vdso_clock_gettime64;
+ __vdso_gettimeofday;
+ __vdso_clock_getres;
+ local: *;
+ };
+}
diff --git a/arch/csky/kernel/vdso/vgettimeofday.c b/arch/csky/kernel/vdso/vgettimeofday.c
new file mode 100644
index 000000000000..55af30e83752
--- /dev/null
+++ b/arch/csky/kernel/vdso/vgettimeofday.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/time.h>
+#include <linux/types.h>
+#include <vdso/gettime.h>
+
+extern
+int __vdso_clock_gettime(clockid_t clock,
+ struct old_timespec32 *ts)
+{
+ return __cvdso_clock_gettime32(clock, ts);
+}
+
+int __vdso_clock_gettime64(clockid_t clock,
+ struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime(clock, ts);
+}
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return __cvdso_gettimeofday(tv, tz);
+}
+
+int __vdso_clock_getres(clockid_t clock_id,
+ struct old_timespec32 *res)
+{
+ return __cvdso_clock_getres_time32(clock_id, res);
+}
diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S
index 2ff37beaf2bf..d718961786d2 100644
--- a/arch/csky/kernel/vmlinux.lds.S
+++ b/arch/csky/kernel/vmlinux.lds.S
@@ -2,6 +2,7 @@
#include <asm/vmlinux.lds.h>
#include <asm/page.h>
+#include <asm/memory.h>
OUTPUT_ARCH(csky)
ENTRY(_start)
@@ -21,22 +22,18 @@ SECTIONS
{
. = PAGE_OFFSET + PHYS_OFFSET_OFFSET;
- _stext = .;
- __init_begin = .;
+ _start = .;
HEAD_TEXT_SECTION
- INIT_TEXT_SECTION(PAGE_SIZE)
- INIT_DATA_SECTION(PAGE_SIZE)
- PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
- __init_end = .;
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_text = .;
+ _stext = .;
+ VBR_BASE
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
@@ -46,20 +43,73 @@ SECTIONS
/* __init_begin __init_end must be page aligned for free_initmem */
. = ALIGN(PAGE_SIZE);
-
+ __init_begin = .;
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(PAGE_SIZE)
+ PERCPU_SECTION(L1_CACHE_BYTES)
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
_sdata = .;
RO_DATA(PAGE_SIZE)
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
+#ifdef CONFIG_HAVE_TCM
+ .tcm_start : {
+ . = ALIGN(PAGE_SIZE);
+ __tcm_start = .;
+ }
+
+ .text_data_tcm FIXADDR_TCM : AT(__tcm_start)
+ {
+ . = ALIGN(4);
+ __stcm_text_data = .;
+ *(.tcm.text)
+ *(.tcm.rodata)
+#ifndef CONFIG_HAVE_DTCM
+ *(.tcm.data)
+#endif
+ . = ALIGN(4);
+ __etcm_text_data = .;
+ }
+
+ . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_data_tcm);
+
+#ifdef CONFIG_HAVE_DTCM
+ #define ITCM_SIZE CONFIG_ITCM_NR_PAGES * PAGE_SIZE
+
+ .dtcm_start : {
+ __dtcm_start = .;
+ }
+
+ .data_tcm FIXADDR_TCM + ITCM_SIZE : AT(__dtcm_start)
+ {
+ . = ALIGN(4);
+ __stcm_data = .;
+ *(.tcm.data)
+ . = ALIGN(4);
+ __etcm_data = .;
+ }
+
+ . = ADDR(.dtcm_start) + SIZEOF(.data_tcm);
+
+ .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_tcm)) {
+#else
+ .tcm_end : AT(ADDR(.tcm_start) + SIZEOF(.text_data_tcm)) {
+#endif
+ . = ALIGN(PAGE_SIZE);
+ __tcm_end = .;
+ }
+#endif
+
EXCEPTION_TABLE(L1_CACHE_BYTES)
BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES)
- VBR_BASE
_end = . ;
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
DISCARDS
}