diff options
Diffstat (limited to 'arch/loongarch/kernel/process.c')
| -rw-r--r-- | arch/loongarch/kernel/process.c | 66 |
1 files changed, 50 insertions, 16 deletions
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index ba457e43f5be..efd9edf65603 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -13,6 +13,7 @@ #include <linux/cpu.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/entry-common.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/sched/debug.h> @@ -34,10 +35,13 @@ #include <linux/nmi.h> #include <asm/asm.h> +#include <asm/asm-prototypes.h> #include <asm/bootinfo.h> #include <asm/cpu.h> #include <asm/elf.h> +#include <asm/exec.h> #include <asm/fpu.h> +#include <asm/lbt.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/irq_regs.h> @@ -45,6 +49,7 @@ #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/reg.h> +#include <asm/switch_to.h> #include <asm/unwind.h> #include <asm/vdso.h> @@ -61,8 +66,9 @@ EXPORT_SYMBOL(__stack_chk_guard); unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; EXPORT_SYMBOL(boot_option_idle_override); -asmlinkage void ret_from_fork(void); -asmlinkage void ret_from_kernel_thread(void); +asmlinkage void restore_and_ret(void); +asmlinkage void ret_from_fork_asm(void); +asmlinkage void ret_from_kernel_thread_asm(void); void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { @@ -82,9 +88,12 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) euen = regs->csr_euen & ~(CSR_EUEN_FPEN); regs->csr_euen = euen; lose_fpu(0); + lose_lbt(0); + current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0; clear_thread_flag(TIF_LSX_CTX_LIVE); clear_thread_flag(TIF_LASX_CTX_LIVE); + clear_thread_flag(TIF_LBT_CTX_LIVE); clear_used_math(); regs->csr_era = pc; regs->regs[3] = sp; @@ -121,14 +130,35 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) preempt_enable(); - if (used_math()) - memcpy(dst, src, sizeof(struct task_struct)); - else + if (!used_math()) memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr)); + else + memcpy(dst, src, offsetof(struct task_struct, thread.lbt.scr0)); + +#ifdef CONFIG_CPU_HAS_LBT + memcpy(&dst->thread.lbt, &src->thread.lbt, sizeof(struct loongarch_lbt)); +#endif return 0; } +asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev, + struct pt_regs *regs) +{ + schedule_tail(prev); + syscall_exit_to_user_mode(regs); +} + +asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev, + struct pt_regs *regs, + int (*fn)(void *), + void *fn_arg) +{ + schedule_tail(prev); + fn(fn_arg); + syscall_exit_to_user_mode(regs); +} + /* * Copy architecture-specific thread state */ @@ -137,7 +167,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) unsigned long childksp; unsigned long tls = args->tls; unsigned long usp = args->stack; - unsigned long clone_flags = args->flags; + u64 clone_flags = args->flags; struct pt_regs *childregs, *regs = current_pt_regs(); childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE; @@ -156,8 +186,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) p->thread.reg03 = childksp; p->thread.reg23 = (unsigned long)args->fn; p->thread.reg24 = (unsigned long)args->fn_arg; - p->thread.reg01 = (unsigned long)ret_from_kernel_thread; - p->thread.sched_ra = (unsigned long)ret_from_kernel_thread; + p->thread.reg01 = (unsigned long)ret_from_kernel_thread_asm; + p->thread.sched_ra = (unsigned long)ret_from_kernel_thread_asm; memset(childregs, 0, sizeof(struct pt_regs)); childregs->csr_euen = p->thread.csr_euen; childregs->csr_crmd = p->thread.csr_crmd; @@ -173,8 +203,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) childregs->regs[3] = usp; p->thread.reg03 = (unsigned long) childregs; - p->thread.reg01 = (unsigned long) ret_from_fork; - p->thread.sched_ra = (unsigned long) ret_from_fork; + p->thread.reg01 = (unsigned long) ret_from_fork_asm; + p->thread.sched_ra = (unsigned long) ret_from_fork_asm; /* * New tasks lose permission to use the fpu. This accelerates context @@ -189,8 +219,10 @@ out: ptrace_hw_copy_thread(p); clear_tsk_thread_flag(p, TIF_USEDFPU); clear_tsk_thread_flag(p, TIF_USEDSIMD); + clear_tsk_thread_flag(p, TIF_USEDLBT); clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE); clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE); + clear_tsk_thread_flag(p, TIF_LBT_CTX_LIVE); return 0; } @@ -282,13 +314,15 @@ unsigned long stack_top(void) { unsigned long top = TASK_SIZE & PAGE_MASK; - /* Space for the VDSO & data page */ - top -= PAGE_ALIGN(current->thread.vdso->size); - top -= VVAR_SIZE; + if (current->thread.vdso) { + /* Space for the VDSO & data page */ + top -= PAGE_ALIGN(current->thread.vdso->size); + top -= VVAR_SIZE; - /* Space to randomize the VDSO base */ - if (current->flags & PF_RANDOMIZE) - top -= VDSO_RANDOMIZE_SIZE; + /* Space to randomize the VDSO base */ + if (current->flags & PF_RANDOMIZE) + top -= VDSO_RANDOMIZE_SIZE; + } return top; } |
