diff options
Diffstat (limited to 'arch/mips/kernel/process.c')
| -rw-r--r-- | arch/mips/kernel/process.c | 76 |
1 files changed, 39 insertions, 37 deletions
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 73c8e7990a97..a3101f2268c6 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -40,7 +40,7 @@ #include <asm/stacktrace.h> #ifdef CONFIG_HOTPLUG_CPU -void arch_cpu_idle_dead(void) +void __noreturn arch_cpu_idle_dead(void) { play_dead(); } @@ -105,10 +105,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) /* * Copy architecture-specific thread state */ -int copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long kthread_arg, struct task_struct *p, - unsigned long tls) +int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { + u64 clone_flags = args->flags; + unsigned long usp = args->stack; + unsigned long tls = args->tls; struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs, *regs = current_pt_regs(); unsigned long childksp; @@ -120,15 +121,28 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, /* Put the stack after the struct pt_regs. */ childksp = (unsigned long) childregs; p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK; - if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { + + /* + * New tasks lose permission to use the fpu. This accelerates context + * switching for most programs since they don't use the fpu. + */ + clear_tsk_thread_flag(p, TIF_USEDFPU); + clear_tsk_thread_flag(p, TIF_USEDMSA); + clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE); + +#ifdef CONFIG_MIPS_MT_FPAFF + clear_tsk_thread_flag(p, TIF_FPUBOUND); +#endif /* CONFIG_MIPS_MT_FPAFF */ + + if (unlikely(args->fn)) { /* kernel thread */ unsigned long status = p->thread.cp0_status; memset(childregs, 0, sizeof(struct pt_regs)); - p->thread.reg16 = usp; /* fn */ - p->thread.reg17 = kthread_arg; + p->thread.reg16 = (unsigned long)args->fn; + p->thread.reg17 = (unsigned long)args->fn_arg; p->thread.reg29 = childksp; p->thread.reg31 = (unsigned long) ret_from_kernel_thread; -#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) +#if defined(CONFIG_CPU_R3000) status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) | ((status & (ST0_KUC | ST0_IEC)) << 2); #else @@ -148,20 +162,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, p->thread.reg29 = (unsigned long) childregs; p->thread.reg31 = (unsigned long) ret_from_fork; - /* - * New tasks lose permission to use the fpu. This accelerates context - * switching for most programs since they don't use the fpu. - */ childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); - clear_tsk_thread_flag(p, TIF_USEDFPU); - clear_tsk_thread_flag(p, TIF_USEDMSA); - clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE); - -#ifdef CONFIG_MIPS_MT_FPAFF - clear_tsk_thread_flag(p, TIF_FPUBOUND); -#endif /* CONFIG_MIPS_MT_FPAFF */ - #ifdef CONFIG_MIPS_FP_SUPPORT atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE); #endif @@ -511,7 +513,7 @@ static int __init frame_info_init(void) /* * Without schedule() frame info, result given by - * thread_saved_pc() and get_wchan() are not reliable. + * thread_saved_pc() and __get_wchan() are not reliable. */ if (schedule_mfi.pc_offset < 0) printk("Can't analyze schedule() prologue at %p\n", schedule); @@ -652,9 +654,9 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, #endif /* - * get_wchan - a maintenance nightmare^W^Wpain in the ass ... + * __get_wchan - a maintenance nightmare^W^Wpain in the ass ... */ -unsigned long get_wchan(struct task_struct *task) +unsigned long __get_wchan(struct task_struct *task) { unsigned long pc = 0; #ifdef CONFIG_KALLSYMS @@ -662,8 +664,6 @@ unsigned long get_wchan(struct task_struct *task) unsigned long ra = 0; #endif - if (!task || task == current || task_is_running(task)) - goto out; if (!task_stack_page(task)) goto out; @@ -690,18 +690,20 @@ unsigned long mips_stack_top(void) } /* Space for the VDSO, data page & GIC user page */ - top -= PAGE_ALIGN(current->thread.abi->vdso->size); - top -= PAGE_SIZE; - top -= mips_gic_present() ? PAGE_SIZE : 0; + if (current->thread.abi) { + top -= PAGE_ALIGN(current->thread.abi->vdso->size); + top -= VDSO_NR_PAGES * PAGE_SIZE; + top -= mips_gic_present() ? PAGE_SIZE : 0; + + /* Space to randomize the VDSO base */ + if (current->flags & PF_RANDOMIZE) + top -= VDSO_RANDOMIZE_SIZE; + } /* Space for cache colour alignment */ if (cpu_has_dc_aliases) top -= shm_align_mask + 1; - /* Space to randomize the VDSO base */ - if (current->flags & PF_RANDOMIZE) - top -= VDSO_RANDOMIZE_SIZE; - return top; } @@ -712,7 +714,7 @@ unsigned long mips_stack_top(void) unsigned long arch_align_stack(unsigned long sp) { if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() & ~PAGE_MASK; + sp -= get_random_u32_below(PAGE_SIZE); return sp & ALMASK; } @@ -751,9 +753,9 @@ static void raise_backtrace(cpumask_t *mask) } } -void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) { - nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace); + nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace); } int mips_get_process_fp_mode(struct task_struct *task) @@ -859,10 +861,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) * scheduled in then it will already have picked up the new FP mode * whilst doing so. */ - get_online_cpus(); + cpus_read_lock(); for_each_cpu_and(cpu, &process_cpus, cpu_online_mask) work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL); - put_online_cpus(); + cpus_read_unlock(); return 0; } |
