diff options
Diffstat (limited to 'arch/mips/kernel')
54 files changed, 773 insertions, 550 deletions
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index d1b11f66f748..b910ec54a3a1 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -27,6 +27,12 @@ void output_ptreg_defines(void); void output_ptreg_defines(void) { COMMENT("MIPS pt_regs offsets."); +#ifdef CONFIG_32BIT + OFFSET(PT_ARG4, pt_regs, args[4]); + OFFSET(PT_ARG5, pt_regs, args[5]); + OFFSET(PT_ARG6, pt_regs, args[6]); + OFFSET(PT_ARG7, pt_regs, args[7]); +#endif OFFSET(PT_R0, pt_regs, regs[0]); OFFSET(PT_R1, pt_regs, regs[1]); OFFSET(PT_R2, pt_regs, regs[2]); @@ -101,6 +107,7 @@ void output_thread_info_defines(void) OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); OFFSET(TI_REGS, thread_info, regs); + OFFSET(TI_SYSCALL, thread_info, syscall); DEFINE(_THREAD_SIZE, THREAD_SIZE); DEFINE(_THREAD_MASK, THREAD_MASK); DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE); diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c index d39a2963b451..2a14dc4ee57e 100644 --- a/arch/mips/kernel/cevt-bcm1480.c +++ b/arch/mips/kernel/cevt-bcm1480.c @@ -103,7 +103,7 @@ void sb1480_clockevent_init(void) BUG_ON(cpu > 3); /* Only have 4 general purpose timers */ - sprintf(name, "bcm1480-counter-%d", cpu); + sprintf(name, "bcm1480-counter-%u", cpu); cd->name = name; cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 368e8475870f..5f6e9e2ebbdb 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -303,13 +303,6 @@ int r4k_clockevent_init(void) if (!c0_compare_int_usable()) return -ENXIO; - /* - * With vectored interrupts things are getting platform specific. - * get_c0_compare_int is a hook to allow a platform to return the - * interrupt number of its liking. - */ - irq = get_c0_compare_int(); - cd = &per_cpu(mips_clockevent_device, cpu); cd->name = "MIPS"; @@ -320,7 +313,6 @@ int r4k_clockevent_init(void) min_delta = calculate_min_delta(); cd->rating = 300; - cd->irq = irq; cd->cpumask = cpumask_of(cpu); cd->set_next_event = mips_next_event; cd->event_handler = mips_event_handler; @@ -332,6 +324,13 @@ int r4k_clockevent_init(void) cp0_timer_irq_installed = 1; + /* + * With vectored interrupts things are getting platform specific. + * get_c0_compare_int is a hook to allow a platform to return the + * interrupt number of its liking. + */ + irq = get_c0_compare_int(); + if (request_irq(irq, c0_compare_interrupt, flags, "timer", c0_compare_interrupt)) pr_err("Failed to request irq %d (timer)\n", irq); diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c index e974a4954df8..c371def2302d 100644 --- a/arch/mips/kernel/cmpxchg.c +++ b/arch/mips/kernel/cmpxchg.c @@ -102,3 +102,4 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, return old; } } +EXPORT_SYMBOL(__cmpxchg_small); diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 64ecfdac6580..f876309130ad 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -4,6 +4,7 @@ * Author: Paul Burton <paul.burton@mips.com> */ +#include <linux/init.h> #include <asm/addrspace.h> #include <asm/asm.h> #include <asm/asm-offsets.h> @@ -82,39 +83,10 @@ .endm -.balign 0x1000 - -LEAF(mips_cps_core_entry) - /* - * These first several instructions will be patched by cps_smp_setup to load the - * CCA to use into register s0 and GCR base address to register s1. - */ - .rept CPS_ENTRY_PATCH_INSNS - nop - .endr - - .global mips_cps_core_entry_patch_end -mips_cps_core_entry_patch_end: - - /* Check whether we're here due to an NMI */ - mfc0 k0, CP0_STATUS - and k0, k0, ST0_NMI - beqz k0, not_nmi - nop - - /* This is an NMI */ - PTR_LA k0, nmi_handler - jr k0 - nop - -not_nmi: - /* Setup Cause */ - li t0, CAUSEF_IV - mtc0 t0, CP0_CAUSE - - /* Setup Status */ - li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS - mtc0 t0, CP0_STATUS +LEAF(mips_cps_core_boot) + /* Save CCA and GCR base */ + move s0, a0 + move s1, a1 /* We don't know how to do coherence setup on earlier ISA */ #if MIPS_ISA_REV > 0 @@ -178,49 +150,45 @@ not_nmi: PTR_L sp, VPEBOOTCFG_SP(v1) jr t1 nop - END(mips_cps_core_entry) + END(mips_cps_core_boot) -.org 0x200 + __INIT LEAF(excep_tlbfill) DUMP_EXCEP("TLB Fill") b . nop END(excep_tlbfill) -.org 0x280 LEAF(excep_xtlbfill) DUMP_EXCEP("XTLB Fill") b . nop END(excep_xtlbfill) -.org 0x300 LEAF(excep_cache) DUMP_EXCEP("Cache") b . nop END(excep_cache) -.org 0x380 LEAF(excep_genex) DUMP_EXCEP("General") b . nop END(excep_genex) -.org 0x400 LEAF(excep_intex) DUMP_EXCEP("Interrupt") b . nop END(excep_intex) -.org 0x480 LEAF(excep_ejtag) PTR_LA k0, ejtag_debug_handler jr k0 nop END(excep_ejtag) + __FINIT LEAF(mips_cps_core_init) #ifdef CONFIG_MIPS_MT_SMP @@ -428,7 +396,7 @@ LEAF(mips_cps_boot_vpes) /* Calculate a pointer to the VPEs struct vpe_boot_config */ li t0, VPEBOOTCFG_SIZE mul t0, t0, ta1 - addu t0, t0, ta3 + PTR_ADDU t0, t0, ta3 /* Set the TC restart PC */ lw t1, VPEBOOTCFG_PC(t0) @@ -603,10 +571,10 @@ dcache_done: lw $1, TI_CPU(gp) sll $1, $1, LONGLOG PTR_LA \dest, __per_cpu_offset - addu $1, $1, \dest + PTR_ADDU $1, $1, \dest lw $1, 0($1) PTR_LA \dest, cps_cpu_state - addu \dest, \dest, $1 + PTR_ADDU \dest, \dest, $1 .set pop .endm diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index b406d8bfb15a..af7412549e6e 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -179,7 +179,6 @@ void __init check_bugs32(void) static inline int cpu_has_confreg(void) { #ifdef CONFIG_CPU_R3000 - extern unsigned long r3k_cache_size(unsigned long); unsigned long size1, size2; unsigned long cfg = read_c0_conf(); @@ -1139,7 +1138,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) * This processor doesn't have an MMU, so it's not * "real easy" to run Linux on it. It is left purely * for documentation. Commented out because it shares - * it's c0_prid id number with the TX3900. + * its c0_prid id number with the TX3900. */ c->cputype = CPU_R4650; __cpu_name[cpu] = "R4650"; @@ -1725,12 +1724,16 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */ + change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER, + LOONGSON_CONF6_INTIMER); break; case PRID_IMP_LOONGSON_64G: __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); set_isa(c, MIPS_CPU_ISA_M64R2); decode_cpucfg(c); + change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER, + LOONGSON_CONF6_INTIMER); break; default: panic("Unknown Loongson Processor ID!"); diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c index be93469c0e0e..0c826f729f75 100644 --- a/arch/mips/kernel/cpu-r3k-probe.c +++ b/arch/mips/kernel/cpu-r3k-probe.c @@ -42,7 +42,6 @@ void __init check_bugs32(void) static inline int cpu_has_confreg(void) { #ifdef CONFIG_CPU_R3000 - extern unsigned long r3k_cache_size(unsigned long); unsigned long size1, size2; unsigned long cfg = read_c0_conf(); diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c index edc4afc080fa..59eca397f297 100644 --- a/arch/mips/kernel/csrc-r4k.c +++ b/arch/mips/kernel/csrc-r4k.c @@ -66,6 +66,18 @@ static bool rdhwr_count_usable(void) return false; } +static inline __init bool count_can_be_sched_clock(void) +{ + if (IS_ENABLED(CONFIG_CPU_FREQ)) + return false; + + if (num_possible_cpus() > 1 && + !IS_ENABLED(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK)) + return false; + + return true; +} + #ifdef CONFIG_CPU_FREQ static bool __read_mostly r4k_clock_unstable; @@ -111,7 +123,8 @@ int __init init_r4k_clocksource(void) return -ENXIO; /* Calculate a somewhat reasonable rating value */ - clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; + clocksource_mips.rating = 200; + clocksource_mips.rating += clamp(mips_hpt_frequency / 10000000, 0, 99); /* * R2 onwards makes the count accessible to user mode so it can be used @@ -122,9 +135,8 @@ int __init init_r4k_clocksource(void) clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); -#ifndef CONFIG_CPU_FREQ - sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); -#endif + if (count_can_be_sched_clock()) + sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); return 0; } diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index 5582a4ca1e9e..f0e7fe85a42a 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c @@ -11,6 +11,7 @@ #include <asm/cpu-features.h> #include <asm/cpu-info.h> +#include <asm/fpu.h> #ifdef CONFIG_MIPS_FP_SUPPORT @@ -309,9 +310,18 @@ void mips_set_personality_nan(struct arch_elf_state *state) struct cpuinfo_mips *c = &boot_cpu_data; struct task_struct *t = current; + /* Do this early so t->thread.fpu.fcr31 won't be clobbered in case + * we are preempted before the lose_fpu(0) in start_thread. + */ + lose_fpu(0); + t->thread.fpu.fcr31 = c->fpu_csr31; switch (state->nan_2008) { case 0: + if (!(c->fpu_msk31 & FPU_CSR_NAN2008)) + t->thread.fpu.fcr31 &= ~FPU_CSR_NAN2008; + if (!(c->fpu_msk31 & FPU_CSR_ABS2008)) + t->thread.fpu.fcr31 &= ~FPU_CSR_ABS2008; break; case 1: if (!(c->fpu_msk31 & FPU_CSR_NAN2008)) diff --git a/arch/mips/kernel/fpu-probe.c b/arch/mips/kernel/fpu-probe.c index e689d6a83234..6bf3f19b1c33 100644 --- a/arch/mips/kernel/fpu-probe.c +++ b/arch/mips/kernel/fpu-probe.c @@ -144,7 +144,7 @@ static void cpu_set_fpu_2008(struct cpuinfo_mips *c) * IEEE 754 conformance mode to use. Affects the NaN encoding and the * ABS.fmt/NEG.fmt execution mode. */ -static enum { STRICT, LEGACY, STD2008, RELAXED } ieee754 = STRICT; +static enum { STRICT, EMULATED, LEGACY, STD2008, RELAXED } ieee754 = STRICT; /* * Set the IEEE 754 NaN encodings and the ABS.fmt/NEG.fmt execution modes @@ -160,6 +160,7 @@ static void cpu_set_nofpu_2008(struct cpuinfo_mips *c) switch (ieee754) { case STRICT: + case EMULATED: if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | @@ -204,6 +205,10 @@ static void cpu_set_nan_2008(struct cpuinfo_mips *c) mips_use_nan_legacy = !cpu_has_nan_2008; mips_use_nan_2008 = !!cpu_has_nan_2008; break; + case EMULATED: + /* Pretend ABS2008/NAN2008 options are dynamic */ + c->fpu_msk31 &= ~(FPU_CSR_NAN2008 | FPU_CSR_ABS2008); + fallthrough; case RELAXED: mips_use_nan_legacy = true; mips_use_nan_2008 = true; @@ -226,6 +231,8 @@ static int __init ieee754_setup(char *s) return -1; else if (!strcmp(s, "strict")) ieee754 = STRICT; + else if (!strcmp(s, "emulated")) + ieee754 = EMULATED; else if (!strcmp(s, "legacy")) ieee754 = LEGACY; else if (!strcmp(s, "2008")) diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 8c401e42301c..f39e85fd58fa 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -248,7 +248,7 @@ int ftrace_disable_ftrace_graph_caller(void) #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ -unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long +static unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) { unsigned long sp, ip, tmp; diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index b6de8e88c1bd..a572ce36a24f 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -272,18 +272,17 @@ NESTED(except_vec_vi, 0, sp) .set push .set noreorder PTR_LA v1, except_vec_vi_handler -FEXPORT(except_vec_vi_lui) - lui v0, 0 /* Patched */ jr v1 FEXPORT(except_vec_vi_ori) - ori v0, 0 /* Patched */ + ori v0, zero, 0 /* Offset in vi_handlers[] */ .set pop END(except_vec_vi) EXPORT(except_vec_vi_end) /* * Common Vectored Interrupt code - * Complete the register saves and invoke the handler which is passed in $v0 + * Complete the register saves and invoke the handler, $v0 holds + * offset into vi_handlers[] */ NESTED(except_vec_vi_handler, 0, sp) SAVE_TEMP @@ -331,6 +330,7 @@ NESTED(except_vec_vi_handler, 0, sp) /* Save task's sp on IRQ stack so that unwinding can follow it */ LONG_S s1, 0(sp) 2: + PTR_L v0, vi_handlers(v0) jalr v0 /* Restore sp */ diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index b825ed4476c7..d99ed58b7043 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -59,6 +59,8 @@ #endif .endm + __HEAD + #ifndef CONFIG_NO_EXCEPT_FILL /* * Reserved space for exception handlers. diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 316b27d0d2fb..dc39f5b3fb83 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -55,7 +55,7 @@ NOKPROBE_SYMBOL(insn_has_delayslot); * one; putting breakpoint on top of atomic ll/sc pair is bad idea; * so we need to prevent it and refuse kprobes insertion for such * instructions; cannot do much about breakpoint in the middle of - * ll/sc pair; it is upto user to avoid those places + * ll/sc pair; it is up to user to avoid those places */ static int insn_has_ll_or_sc(union mips_instruction insn) { diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 6b61be486303..a0c0a7a654e9 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -42,6 +42,7 @@ #include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/mman.h> +#include <asm/syscalls.h> #ifdef __MIPSEB__ #define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL)) diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c index 432bfd3e7f22..4e3579bbd620 100644 --- a/arch/mips/kernel/machine_kexec.c +++ b/arch/mips/kernel/machine_kexec.c @@ -8,6 +8,7 @@ #include <linux/mm.h> #include <linux/delay.h> #include <linux/libfdt.h> +#include <linux/reboot.h> #include <asm/cacheflush.h> #include <asm/page.h> diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 3f00788b0871..3eb2cfb893e1 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -179,7 +179,7 @@ static char *cm3_causes[32] = { static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock); static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags); -phys_addr_t __mips_cm_phys_base(void) +phys_addr_t __weak mips_cm_phys_base(void) { unsigned long cmgcr; @@ -198,10 +198,7 @@ phys_addr_t __mips_cm_phys_base(void) return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32); } -phys_addr_t mips_cm_phys_base(void) - __attribute__((weak, alias("__mips_cm_phys_base"))); - -phys_addr_t __mips_cm_l2sync_phys_base(void) +phys_addr_t __weak mips_cm_l2sync_phys_base(void) { u32 base_reg; @@ -217,9 +214,6 @@ phys_addr_t __mips_cm_l2sync_phys_base(void) return mips_cm_phys_base() + MIPS_CM_GCR_SIZE; } -phys_addr_t mips_cm_l2sync_phys_base(void) - __attribute__((weak, alias("__mips_cm_l2sync_phys_base"))); - static void mips_cm_probe_l2sync(void) { unsigned major_rev; @@ -518,3 +512,40 @@ void mips_cm_error_report(void) /* reprime cause register */ write_gcr_error_cause(cm_error); } + +unsigned int mips_cps_first_online_in_cluster(void) +{ + unsigned int local_cl; + int i; + + local_cl = cpu_cluster(¤t_cpu_data); + + /* + * We rely upon knowledge that CPUs are numbered sequentially by + * cluster - ie. CPUs 0..X will be in cluster 0, CPUs X+1..Y in cluster + * 1, CPUs Y+1..Z in cluster 2 etc. This means that CPUs in the same + * cluster will immediately precede or follow one another. + * + * First we scan backwards, until we find an online CPU in the cluster + * or we move on to another cluster. + */ + for (i = smp_processor_id() - 1; i >= 0; i--) { + if (cpu_cluster(&cpu_data[i]) != local_cl) + break; + if (!cpu_online(i)) + continue; + return false; + } + + /* Then do the same for higher numbered CPUs */ + for (i = smp_processor_id() + 1; i < nr_cpu_ids; i++) { + if (cpu_cluster(&cpu_data[i]) != local_cl) + break; + if (!cpu_online(i)) + continue; + return false; + } + + /* We found no online CPUs in the local cluster */ + return true; +} diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 67e130d3f038..10172fc4f627 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -15,6 +15,7 @@ #include <linux/security.h> #include <linux/types.h> #include <linux/uaccess.h> +#include <asm/syscalls.h> /* * CPU mask used to set process affinity for MT VPEs/TCs with FPUs diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index f88b7919f11f..2ef610650a9e 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -19,6 +19,7 @@ #include <asm/mipsmtregs.h> #include <asm/r4kcache.h> #include <asm/cacheflush.h> +#include <asm/mips_mt.h> int vpelimit; @@ -42,83 +43,6 @@ static int __init maxtcs(char *str) __setup("maxtcs=", maxtcs); -/* - * Dump new MIPS MT state for the core. Does not leave TCs halted. - * Takes an argument which taken to be a pre-call MVPControl value. - */ - -void mips_mt_regdump(unsigned long mvpctl) -{ - unsigned long flags; - unsigned long vpflags; - unsigned long mvpconf0; - int nvpe; - int ntc; - int i; - int tc; - unsigned long haltval; - unsigned long tcstatval; - - local_irq_save(flags); - vpflags = dvpe(); - printk("=== MIPS MT State Dump ===\n"); - printk("-- Global State --\n"); - printk(" MVPControl Passed: %08lx\n", mvpctl); - printk(" MVPControl Read: %08lx\n", vpflags); - printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0())); - nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; - ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; - printk("-- per-VPE State --\n"); - for (i = 0; i < nvpe; i++) { - for (tc = 0; tc < ntc; tc++) { - settc(tc); - if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { - printk(" VPE %d\n", i); - printk(" VPEControl : %08lx\n", - read_vpe_c0_vpecontrol()); - printk(" VPEConf0 : %08lx\n", - read_vpe_c0_vpeconf0()); - printk(" VPE%d.Status : %08lx\n", - i, read_vpe_c0_status()); - printk(" VPE%d.EPC : %08lx %pS\n", - i, read_vpe_c0_epc(), - (void *) read_vpe_c0_epc()); - printk(" VPE%d.Cause : %08lx\n", - i, read_vpe_c0_cause()); - printk(" VPE%d.Config7 : %08lx\n", - i, read_vpe_c0_config7()); - break; /* Next VPE */ - } - } - } - printk("-- per-TC State --\n"); - for (tc = 0; tc < ntc; tc++) { - settc(tc); - if (read_tc_c0_tcbind() == read_c0_tcbind()) { - /* Are we dumping ourself? */ - haltval = 0; /* Then we're not halted, and mustn't be */ - tcstatval = flags; /* And pre-dump TCStatus is flags */ - printk(" TC %d (current TC with VPE EPC above)\n", tc); - } else { - haltval = read_tc_c0_tchalt(); - write_tc_c0_tchalt(1); - tcstatval = read_tc_c0_tcstatus(); - printk(" TC %d\n", tc); - } - printk(" TCStatus : %08lx\n", tcstatval); - printk(" TCBind : %08lx\n", read_tc_c0_tcbind()); - printk(" TCRestart : %08lx %pS\n", - read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart()); - printk(" TCHalt : %08lx\n", haltval); - printk(" TCContext : %08lx\n", read_tc_c0_tccontext()); - if (!haltval) - write_tc_c0_tchalt(0); - } - printk("===========================\n"); - evpe(vpflags); - local_irq_restore(flags); -} - static int mt_opt_rpsctl = -1; static int mt_opt_nblsu = -1; static int mt_opt_forceconfig7; @@ -198,9 +122,8 @@ void mips_mt_set_cpuoptions(void) unsigned long ectlval; unsigned long itcblkgrn; - /* ErrCtl register is known as "ecc" to Linux */ - ectlval = read_c0_ecc(); - write_c0_ecc(ectlval | (0x1 << 26)); + ectlval = read_c0_errctl(); + write_c0_errctl(ectlval | (0x1 << 26)); ehb(); #define INDEX_0 (0x80000000) #define INDEX_8 (0x80000008) @@ -221,26 +144,20 @@ void mips_mt_set_cpuoptions(void) ehb(); /* Write out to ITU with CACHE op */ cache_op(Index_Store_Tag_D, INDEX_0); - write_c0_ecc(ectlval); + write_c0_errctl(ectlval); ehb(); printk("Mapped %ld ITC cells starting at 0x%08x\n", ((itcblkgrn & 0x7fe00000) >> 20), itc_base); } } -struct class *mt_class; +const struct class mt_class = { + .name = "mt", +}; static int __init mips_mt_init(void) { - struct class *mtc; - - mtc = class_create("mt"); - if (IS_ERR(mtc)) - return PTR_ERR(mtc); - - mt_class = mtc; - - return 0; + return class_register(&mt_class); } subsys_initcall(mips_mt_init); diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index 0c936cbf20c5..ba0f62d8eff5 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c @@ -13,15 +13,13 @@ #include <linux/elf.h> #include <linux/mm.h> #include <linux/numa.h> -#include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/jump_label.h> - -extern void jump_label_apply_nops(struct module *mod); +#include <asm/jump_label.h> struct mips_hi16 { struct mips_hi16 *next; @@ -32,15 +30,6 @@ struct mips_hi16 { static LIST_HEAD(dbe_list); static DEFINE_SPINLOCK(dbe_lock); -#ifdef MODULE_START -void *module_alloc(unsigned long size) -{ - return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END, - GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, - __builtin_return_address(0)); -} -#endif - static void apply_r_mips_32(u32 *location, u32 base, Elf_Addr v) { *location = base + v; diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index 9bf60d7d44d3..d09ca77e624d 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -18,6 +18,7 @@ #include <asm/mipsmtregs.h> #include <asm/pm.h> #include <asm/pm-cps.h> +#include <asm/regdef.h> #include <asm/smp-cps.h> #include <asm/uasm.h> @@ -69,13 +70,6 @@ DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state); static struct uasm_label labels[32]; static struct uasm_reloc relocs[32]; -enum mips_reg { - zero, at, v0, v1, a0, a1, a2, a3, - t0, t1, t2, t3, t4, t5, t6, t7, - s0, s1, s2, s3, s4, s5, s6, s7, - t8, t9, k0, k1, gp, sp, fp, ra, -}; - bool cps_pm_support_state(enum cps_pm_state state) { return test_bit(state, state_support); @@ -203,13 +197,13 @@ static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, return; /* Load base address */ - UASM_i_LA(pp, t0, (long)CKSEG0); + UASM_i_LA(pp, GPR_T0, (long)CKSEG0); /* Calculate end address */ if (cache_size < 0x8000) - uasm_i_addiu(pp, t1, t0, cache_size); + uasm_i_addiu(pp, GPR_T1, GPR_T0, cache_size); else - UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size)); + UASM_i_LA(pp, GPR_T1, (long)(CKSEG0 + cache_size)); /* Start of cache op loop */ uasm_build_label(pl, *pp, lbl); @@ -217,19 +211,19 @@ static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, /* Generate the cache ops */ for (i = 0; i < unroll_lines; i++) { if (cpu_has_mips_r6) { - uasm_i_cache(pp, op, 0, t0); - uasm_i_addiu(pp, t0, t0, cache->linesz); + uasm_i_cache(pp, op, 0, GPR_T0); + uasm_i_addiu(pp, GPR_T0, GPR_T0, cache->linesz); } else { - uasm_i_cache(pp, op, i * cache->linesz, t0); + uasm_i_cache(pp, op, i * cache->linesz, GPR_T0); } } if (!cpu_has_mips_r6) /* Update the base address */ - uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz); + uasm_i_addiu(pp, GPR_T0, GPR_T0, unroll_lines * cache->linesz); /* Loop if we haven't reached the end address yet */ - uasm_il_bne(pp, pr, t0, t1, lbl); + uasm_il_bne(pp, pr, GPR_T0, GPR_T1, lbl); uasm_i_nop(pp); } @@ -275,25 +269,25 @@ static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, */ /* Preserve perf counter setup */ - uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ - uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_mfc0(pp, GPR_T2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_mfc0(pp, GPR_T3, 25, (perf_counter * 2) + 1); /* PerfCntN */ /* Setup perf counter to count FSB full pipeline stalls */ - uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf); - uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_addiu(pp, GPR_T0, GPR_ZERO, (perf_event << 5) | 0xf); + uasm_i_mtc0(pp, GPR_T0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ uasm_i_ehb(pp); - uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_mtc0(pp, GPR_ZERO, 25, (perf_counter * 2) + 1); /* PerfCntN */ uasm_i_ehb(pp); /* Base address for loads */ - UASM_i_LA(pp, t0, (long)CKSEG0); + UASM_i_LA(pp, GPR_T0, (long)CKSEG0); /* Start of clear loop */ uasm_build_label(pl, *pp, lbl); /* Perform some loads to fill the FSB */ for (i = 0; i < num_loads; i++) - uasm_i_lw(pp, zero, i * line_size * line_stride, t0); + uasm_i_lw(pp, GPR_ZERO, i * line_size * line_stride, GPR_T0); /* * Invalidate the new D-cache entries so that the cache will need @@ -301,9 +295,9 @@ static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, */ for (i = 0; i < num_loads; i++) { uasm_i_cache(pp, Hit_Invalidate_D, - i * line_size * line_stride, t0); + i * line_size * line_stride, GPR_T0); uasm_i_cache(pp, Hit_Writeback_Inv_SD, - i * line_size * line_stride, t0); + i * line_size * line_stride, GPR_T0); } /* Barrier ensuring previous cache invalidates are complete */ @@ -311,16 +305,16 @@ static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, uasm_i_ehb(pp); /* Check whether the pipeline stalled due to the FSB being full */ - uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_mfc0(pp, GPR_T1, 25, (perf_counter * 2) + 1); /* PerfCntN */ /* Loop if it didn't */ - uasm_il_beqz(pp, pr, t1, lbl); + uasm_il_beqz(pp, pr, GPR_T1, lbl); uasm_i_nop(pp); /* Restore perf counter 1. The count may well now be wrong... */ - uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_mtc0(pp, GPR_T2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ uasm_i_ehb(pp); - uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_mtc0(pp, GPR_T3, 25, (perf_counter * 2) + 1); /* PerfCntN */ uasm_i_ehb(pp); return 0; @@ -330,12 +324,12 @@ static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, struct uasm_reloc **pr, unsigned r_addr, int lbl) { - uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000)); + uasm_i_lui(pp, GPR_T0, uasm_rel_hi(0x80000000)); uasm_build_label(pl, *pp, lbl); - uasm_i_ll(pp, t1, 0, r_addr); - uasm_i_or(pp, t1, t1, t0); - uasm_i_sc(pp, t1, 0, r_addr); - uasm_il_beqz(pp, pr, t1, lbl); + uasm_i_ll(pp, GPR_T1, 0, r_addr); + uasm_i_or(pp, GPR_T1, GPR_T1, GPR_T0); + uasm_i_sc(pp, GPR_T1, 0, r_addr); + uasm_il_beqz(pp, pr, GPR_T1, lbl); uasm_i_nop(pp); } @@ -344,9 +338,9 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) struct uasm_label *l = labels; struct uasm_reloc *r = relocs; u32 *buf, *p; - const unsigned r_online = a0; - const unsigned r_nc_count = a1; - const unsigned r_pcohctl = t7; + const unsigned r_online = GPR_A0; + const unsigned r_nc_count = GPR_A1; + const unsigned r_pcohctl = GPR_T8; const unsigned max_instrs = 256; unsigned cpc_cmd; int err; @@ -383,8 +377,8 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * with the return address placed in v0 to avoid clobbering * the ra register before it is saved. */ - UASM_i_LA(&p, t0, (long)mips_cps_pm_save); - uasm_i_jalr(&p, v0, t0); + UASM_i_LA(&p, GPR_T0, (long)mips_cps_pm_save); + uasm_i_jalr(&p, GPR_V0, GPR_T0); uasm_i_nop(&p); } @@ -399,11 +393,11 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) /* Increment ready_count */ uasm_i_sync(&p, __SYNC_mb); uasm_build_label(&l, p, lbl_incready); - uasm_i_ll(&p, t1, 0, r_nc_count); - uasm_i_addiu(&p, t2, t1, 1); - uasm_i_sc(&p, t2, 0, r_nc_count); - uasm_il_beqz(&p, &r, t2, lbl_incready); - uasm_i_addiu(&p, t1, t1, 1); + uasm_i_ll(&p, GPR_T1, 0, r_nc_count); + uasm_i_addiu(&p, GPR_T2, GPR_T1, 1); + uasm_i_sc(&p, GPR_T2, 0, r_nc_count); + uasm_il_beqz(&p, &r, GPR_T2, lbl_incready); + uasm_i_addiu(&p, GPR_T1, GPR_T1, 1); /* Barrier ensuring all CPUs see the updated r_nc_count value */ uasm_i_sync(&p, __SYNC_mb); @@ -412,7 +406,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * If this is the last VPE to become ready for non-coherence * then it should branch below. */ - uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence); + uasm_il_beq(&p, &r, GPR_T1, r_online, lbl_disable_coherence); uasm_i_nop(&p); if (state < CPS_PM_POWER_GATED) { @@ -422,13 +416,13 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * has been disabled before proceeding, which it will do * by polling for the top bit of ready_count being set. */ - uasm_i_addiu(&p, t1, zero, -1); + uasm_i_addiu(&p, GPR_T1, GPR_ZERO, -1); uasm_build_label(&l, p, lbl_poll_cont); - uasm_i_lw(&p, t0, 0, r_nc_count); - uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); + uasm_i_lw(&p, GPR_T0, 0, r_nc_count); + uasm_il_bltz(&p, &r, GPR_T0, lbl_secondary_cont); uasm_i_ehb(&p); if (cpu_has_mipsmt) - uasm_i_yield(&p, zero, t1); + uasm_i_yield(&p, GPR_ZERO, GPR_T1); uasm_il_b(&p, &r, lbl_poll_cont); uasm_i_nop(&p); } else { @@ -438,16 +432,16 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) */ if (cpu_has_mipsmt) { /* Halt the VPE via C0 tchalt register */ - uasm_i_addiu(&p, t0, zero, TCHALT_H); - uasm_i_mtc0(&p, t0, 2, 4); + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, TCHALT_H); + uasm_i_mtc0(&p, GPR_T0, 2, 4); } else if (cpu_has_vp) { /* Halt the VP via the CPC VP_STOP register */ unsigned int vpe_id; vpe_id = cpu_vpe_id(&cpu_data[cpu]); - uasm_i_addiu(&p, t0, zero, 1 << vpe_id); - UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop()); - uasm_i_sw(&p, t0, 0, t1); + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << vpe_id); + UASM_i_LA(&p, GPR_T1, (long)addr_cpc_cl_vp_stop()); + uasm_i_sw(&p, GPR_T0, 0, GPR_T1); } else { BUG(); } @@ -482,9 +476,9 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * defined by the interAptiv & proAptiv SUMs as ensuring that the * operation resulting from the preceding store is complete. */ - uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu])); - uasm_i_sw(&p, t0, 0, r_pcohctl); - uasm_i_lw(&p, t0, 0, r_pcohctl); + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << cpu_core(&cpu_data[cpu])); + uasm_i_sw(&p, GPR_T0, 0, r_pcohctl); + uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); /* Barrier to ensure write to coherence control is complete */ uasm_i_sync(&p, __SYNC_full); @@ -492,8 +486,8 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) } /* Disable coherence */ - uasm_i_sw(&p, zero, 0, r_pcohctl); - uasm_i_lw(&p, t0, 0, r_pcohctl); + uasm_i_sw(&p, GPR_ZERO, 0, r_pcohctl); + uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); if (state >= CPS_PM_CLOCK_GATED) { err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], @@ -515,9 +509,9 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) } /* Issue the CPC command */ - UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd()); - uasm_i_addiu(&p, t1, zero, cpc_cmd); - uasm_i_sw(&p, t1, 0, t0); + UASM_i_LA(&p, GPR_T0, (long)addr_cpc_cl_cmd()); + uasm_i_addiu(&p, GPR_T1, GPR_ZERO, cpc_cmd); + uasm_i_sw(&p, GPR_T1, 0, GPR_T0); if (state == CPS_PM_POWER_GATED) { /* If anything goes wrong just hang */ @@ -564,12 +558,12 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * will run this. The first will actually re-enable coherence & the * rest will just be performing a rather unusual nop. */ - uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3 + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, mips_cm_revision() < CM_REV_CM3 ? CM_GCR_Cx_COHERENCE_COHDOMAINEN : CM3_GCR_Cx_COHERENCE_COHEN); - uasm_i_sw(&p, t0, 0, r_pcohctl); - uasm_i_lw(&p, t0, 0, r_pcohctl); + uasm_i_sw(&p, GPR_T0, 0, r_pcohctl); + uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); /* Barrier to ensure write to coherence control is complete */ uasm_i_sync(&p, __SYNC_full); @@ -579,11 +573,11 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) /* Decrement ready_count */ uasm_build_label(&l, p, lbl_decready); uasm_i_sync(&p, __SYNC_mb); - uasm_i_ll(&p, t1, 0, r_nc_count); - uasm_i_addiu(&p, t2, t1, -1); - uasm_i_sc(&p, t2, 0, r_nc_count); - uasm_il_beqz(&p, &r, t2, lbl_decready); - uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); + uasm_i_ll(&p, GPR_T1, 0, r_nc_count); + uasm_i_addiu(&p, GPR_T2, GPR_T1, -1); + uasm_i_sc(&p, GPR_T2, 0, r_nc_count); + uasm_il_beqz(&p, &r, GPR_T2, lbl_decready); + uasm_i_andi(&p, GPR_V0, GPR_T1, (1 << fls(smp_num_siblings)) - 1); /* Barrier ensuring all CPUs see the updated r_nc_count value */ uasm_i_sync(&p, __SYNC_mb); @@ -612,7 +606,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) } /* The core is coherent, time to return to C code */ - uasm_i_jr(&p, ra); + uasm_i_jr(&p, GPR_RA); uasm_i_nop(&p); gen_done: diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 8eba5a1ed664..8f0a0001540c 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -66,24 +66,23 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", cpu_data[n].udelay_val / (500000/HZ), (cpu_data[n].udelay_val / (5000/HZ)) % 100); - seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no"); + seq_printf(m, "wait instruction\t: %s\n", str_yes_no(cpu_wait)); seq_printf(m, "microsecond timers\t: %s\n", - cpu_has_counter ? "yes" : "no"); + str_yes_no(cpu_has_counter)); seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize); seq_printf(m, "extra interrupt vector\t: %s\n", - cpu_has_divec ? "yes" : "no"); - seq_printf(m, "hardware watchpoint\t: %s", - cpu_has_watch ? "yes, " : "no\n"); + str_yes_no(cpu_has_divec)); + seq_printf(m, "hardware watchpoint\t: %s", str_yes_no(cpu_has_watch)); if (cpu_has_watch) { - seq_printf(m, "count: %d, address/irw mask: [", + seq_printf(m, ", count: %d, address/irw mask: [", cpu_data[n].watch_reg_count); for (i = 0; i < cpu_data[n].watch_reg_count; i++) seq_printf(m, "%s0x%04x", i ? ", " : "", cpu_data[n].watch_reg_masks[i]); - seq_puts(m, "]\n"); + seq_puts(m, "]"); } - seq_puts(m, "isa\t\t\t:"); + seq_puts(m, "\nisa\t\t\t:"); if (cpu_has_mips_1) seq_puts(m, " mips1"); if (cpu_has_mips_2) @@ -155,7 +154,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_mmips) { seq_printf(m, "micromips kernel\t: %s\n", - (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no"); + str_yes_no(read_c0_config3() & MIPS_CONF3_ISA_OE)); } seq_puts(m, "Options implemented\t:"); diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index f88ce78e13e3..4fd6da0a06c3 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -28,6 +28,8 @@ __init void mips_set_machine_name(const char *name) strscpy(mips_machine_name, name, sizeof(mips_machine_name)); pr_info("MIPS: machine is %s\n", mips_get_machine_name()); + + dump_stack_set_arch_desc(name); } char *mips_get_machine_name(void) @@ -39,7 +41,7 @@ char *mips_get_machine_name(void) void __init __dt_setup_arch(void *bph) { - if (!early_init_dt_scan(bph)) + if (!early_init_dt_scan(bph, __pa(bph))) return; mips_set_machine_name(of_flat_dt_get_machine_name()); diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index d9df543f7e2c..61503a36067e 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -31,6 +31,7 @@ #include <linux/seccomp.h> #include <linux/ftrace.h> +#include <asm/branch.h> #include <asm/byteorder.h> #include <asm/cpu.h> #include <asm/cpu-info.h> @@ -48,6 +49,12 @@ #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> +unsigned long exception_ip(struct pt_regs *regs) +{ + return exception_epc(regs); +} +EXPORT_SYMBOL(exception_ip); + /* * Called by kernel/ptrace.c when detaching.. * @@ -1310,16 +1317,13 @@ long arch_ptrace(struct task_struct *child, long request, * Notification of system call entry/exit * - triggered by current->work.syscall_trace */ -asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) +asmlinkage long syscall_trace_enter(struct pt_regs *regs) { user_exit(); - current_thread_info()->syscall = syscall; - if (test_thread_flag(TIF_SYSCALL_TRACE)) { if (ptrace_report_syscall_entry(regs)) return -1; - syscall = current_thread_info()->syscall; } #ifdef CONFIG_SECCOMP @@ -1328,7 +1332,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) struct seccomp_data sd; unsigned long args[6]; - sd.nr = syscall; + sd.nr = current_thread_info()->syscall; sd.arch = syscall_get_arch(current); syscall_get_arguments(current, regs, args); for (i = 0; i < 6; i++) @@ -1338,23 +1342,23 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) ret = __secure_computing(&sd); if (ret == -1) return ret; - syscall = current_thread_info()->syscall; } #endif if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->regs[2]); - audit_syscall_entry(syscall, regs->regs[4], regs->regs[5], + audit_syscall_entry(current_thread_info()->syscall, + regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); /* * Negative syscall numbers are mistaken for rejected syscalls, but * won't have had the return value set appropriately, so we do so now. */ - if (syscall < 0) + if (current_thread_info()->syscall < 0) syscall_set_return_value(current, regs, -ENOSYS, 0); - return syscall; + return current_thread_info()->syscall; } /* diff --git a/arch/mips/kernel/r4k-bugs64.c b/arch/mips/kernel/r4k-bugs64.c index 6ffefb2c6971..1e300330078d 100644 --- a/arch/mips/kernel/r4k-bugs64.c +++ b/arch/mips/kernel/r4k-bugs64.c @@ -14,6 +14,7 @@ #include <asm/fpu.h> #include <asm/mipsregs.h> #include <asm/setup.h> +#include <asm/traps.h> static char bug64hit[] __initdata = "reliable operation impossible!\n%s"; diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c index 58fc8d089402..cda7983e7c18 100644 --- a/arch/mips/kernel/relocate.c +++ b/arch/mips/kernel/relocate.c @@ -337,7 +337,7 @@ void *__init relocate_kernel(void) #if defined(CONFIG_USE_OF) /* Deal with the device tree */ fdt = plat_get_fdt(); - early_init_dt_scan(fdt); + early_init_dt_scan(fdt, __pa(fdt)); if (boot_command_line[0]) { /* Boot command line was passed in device tree */ strscpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); @@ -380,7 +380,7 @@ void *__init relocate_kernel(void) } #endif /* CONFIG_USE_OF */ - /* Copy the kernel to it's new location */ + /* Copy the kernel to its new location */ memcpy(loc_new, &_text, kernel_length); /* Perform relocations on the new kernel */ diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S index 8f0a7263a9d6..de894a0211d7 100644 --- a/arch/mips/kernel/relocate_kernel.S +++ b/arch/mips/kernel/relocate_kernel.S @@ -70,7 +70,7 @@ copy_word: done: #ifdef CONFIG_SMP /* kexec_flag reset is signal to other CPUs what kernel - was moved to it's location. Note - we need relocated address + was moved to its location. Note - we need relocated address of kexec_flag. */ bal 1f diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c index 38c6925a1bea..ff7535de42ca 100644 --- a/arch/mips/kernel/rtlx-mt.c +++ b/arch/mips/kernel/rtlx-mt.c @@ -95,11 +95,11 @@ int __init rtlx_module_init(void) atomic_set(&channel_wqs[i].in_open, 0); mutex_init(&channel_wqs[i].mutex); - dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, + dev = device_create(&mt_class, NULL, MKDEV(major, i), NULL, "%s%d", RTLX_MODULE_NAME, i); if (IS_ERR(dev)) { while (i--) - device_destroy(mt_class, MKDEV(major, i)); + device_destroy(&mt_class, MKDEV(major, i)); err = PTR_ERR(dev); goto out_chrdev; @@ -127,7 +127,7 @@ int __init rtlx_module_init(void) out_class: for (i = 0; i < RTLX_CHANNELS; i++) - device_destroy(mt_class, MKDEV(major, i)); + device_destroy(&mt_class, MKDEV(major, i)); out_chrdev: unregister_chrdev(major, RTLX_MODULE_NAME); @@ -139,7 +139,7 @@ void __exit rtlx_module_exit(void) int i; for (i = 0; i < RTLX_CHANNELS; i++) - device_destroy(mt_class, MKDEV(major, i)); + device_destroy(&mt_class, MKDEV(major, i)); unregister_chrdev(major, RTLX_MODULE_NAME); diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 18dc9b345056..4947a4f39e37 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -64,10 +64,10 @@ load_a6: user_lw(t7, 24(t0)) # argument #7 from usp load_a7: user_lw(t8, 28(t0)) # argument #8 from usp loads_done: - sw t5, 16(sp) # argument #5 to ksp - sw t6, 20(sp) # argument #6 to ksp - sw t7, 24(sp) # argument #7 to ksp - sw t8, 28(sp) # argument #8 to ksp + sw t5, PT_ARG4(sp) # argument #5 to ksp + sw t6, PT_ARG5(sp) # argument #6 to ksp + sw t7, PT_ARG6(sp) # argument #7 to ksp + sw t8, PT_ARG7(sp) # argument #8 to ksp .set pop .section __ex_table,"a" @@ -77,6 +77,18 @@ loads_done: PTR_WD load_a7, bad_stack_a7 .previous + /* + * syscall number is in v0 unless we called syscall(__NR_###) + * where the real syscall number is in a0 + */ + subu t2, v0, __NR_O32_Linux + bnez t2, 1f /* __NR_syscall at offset 0 */ + LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number + b 2f +1: + LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number +2: + lw t0, TI_FLAGS($28) # syscall tracing enabled? li t1, _TIF_WORK_SYSCALL_ENTRY and t0, t1 @@ -114,16 +126,7 @@ syscall_trace_entry: SAVE_STATIC move a0, sp - /* - * syscall number is in v0 unless we called syscall(__NR_###) - * where the real syscall number is in a0 - */ - move a1, v0 - subu t2, v0, __NR_O32_Linux - bnez t2, 1f /* __NR_syscall at offset 0 */ - lw a1, PT_R4(sp) - -1: jal syscall_trace_enter + jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 97456b2ca7dc..97788859238c 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -44,6 +44,8 @@ NESTED(handle_sysn32, PT_SIZE, sp) sd a3, PT_R26(sp) # save a3 for syscall restarting + LONG_S v0, TI_SYSCALL($28) # Store syscall number + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 @@ -72,7 +74,6 @@ syscall_common: n32_syscall_trace_entry: SAVE_STATIC move a0, sp - move a1, v0 jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S index e6264aa62e45..be11ea5cc67e 100644 --- a/arch/mips/kernel/scall64-n64.S +++ b/arch/mips/kernel/scall64-n64.S @@ -46,6 +46,8 @@ NESTED(handle_sys64, PT_SIZE, sp) sd a3, PT_R26(sp) # save a3 for syscall restarting + LONG_S v0, TI_SYSCALL($28) # Store syscall number + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 @@ -82,7 +84,6 @@ n64_syscall_exit: syscall_trace_entry: SAVE_STATIC move a0, sp - move a1, v0 jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index d3c2616cba22..7a5abb73e531 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -79,6 +79,22 @@ loads_done: PTR_WD load_a7, bad_stack_a7 .previous + /* + * absolute syscall number is in v0 unless we called syscall(__NR_###) + * where the real syscall number is in a0 + * note: NR_syscall is the first O32 syscall but the macro is + * only defined when compiling with -mabi=32 (CONFIG_32BIT) + * therefore __NR_O32_Linux is used (4000) + */ + + subu t2, v0, __NR_O32_Linux + bnez t2, 1f /* __NR_syscall at offset 0 */ + LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number + b 2f +1: + LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number +2: + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 @@ -113,22 +129,7 @@ trace_a_syscall: sd a7, PT_R11(sp) # For indirect syscalls move a0, sp - /* - * absolute syscall number is in v0 unless we called syscall(__NR_###) - * where the real syscall number is in a0 - * note: NR_syscall is the first O32 syscall but the macro is - * only defined when compiling with -mabi=32 (CONFIG_32BIT) - * therefore __NR_O32_Linux is used (4000) - */ - .set push - .set reorder - subu t1, v0, __NR_O32_Linux - move a1, v0 - bnez t1, 1f /* __NR_syscall at offset 0 */ - ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */ - .set pop - -1: jal syscall_trace_enter + jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 2d2ca024bd47..fbfe0771317e 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -42,6 +42,7 @@ #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp-ops.h> +#include <asm/mips-cps.h> #include <asm/prom.h> #include <asm/fw/fw.h> @@ -146,7 +147,7 @@ static unsigned long __init init_initrd(void) /* * Board specific code or command line parser should have * already set up initrd_start and initrd_end. In these cases - * perfom sanity checks and use them if all looks good. + * perform sanity checks and use them if all looks good. */ if (!initrd_start || initrd_end <= initrd_start) goto disable; @@ -321,11 +322,11 @@ static void __init bootmem_init(void) panic("Incorrect memory mapping !!!"); if (max_pfn > PFN_DOWN(HIGHMEM_START)) { + max_low_pfn = PFN_DOWN(HIGHMEM_START); #ifdef CONFIG_HIGHMEM - highstart_pfn = PFN_DOWN(HIGHMEM_START); + highstart_pfn = max_low_pfn; highend_pfn = max_pfn; #else - max_low_pfn = PFN_DOWN(HIGHMEM_START); max_pfn = max_low_pfn; #endif } @@ -441,8 +442,6 @@ static void __init mips_reserve_vmcore(void) #endif } -#ifdef CONFIG_KEXEC - /* 64M alignment for crash kernel regions */ #define CRASH_ALIGN SZ_64M #define CRASH_ADDR_MAX SZ_512M @@ -453,6 +452,9 @@ static void __init mips_parse_crashkernel(void) unsigned long long crash_size, crash_base; int ret; + if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) + return; + total_mem = memblock_phys_mem_size(); ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base, @@ -488,6 +490,9 @@ static void __init request_crashkernel(struct resource *res) { int ret; + if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) + return; + if (crashk_res.start == crashk_res.end) return; @@ -497,15 +502,6 @@ static void __init request_crashkernel(struct resource *res) (unsigned long)(resource_size(&crashk_res) >> 20), (unsigned long)(crashk_res.start >> 20)); } -#else /* !defined(CONFIG_KEXEC) */ -static void __init mips_parse_crashkernel(void) -{ -} - -static void __init request_crashkernel(struct resource *res) -{ -} -#endif /* !defined(CONFIG_KEXEC) */ static void __init check_kernel_sections_mem(void) { @@ -708,10 +704,7 @@ static void __init resource_init(void) for_each_mem_range(i, &start, &end) { struct resource *res; - res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); - if (!res) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(struct resource)); + res = memblock_alloc_or_panic(sizeof(struct resource), SMP_CACHE_BYTES); res->start = start; /* diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index f50d48435c68..136eb20ac024 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h @@ -40,4 +40,7 @@ _restore_fp_context(void __user *fpregs, void __user *csr); extern asmlinkage int _save_msa_all_upper(void __user *buf); extern asmlinkage int _restore_msa_all_upper(void __user *buf); +extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *); +extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *); + #endif /* __SIGNAL_COMMON_H */ diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 479999b7f2de..4a10f18a8806 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -38,6 +38,7 @@ #include <asm/dsp.h> #include <asm/inst.h> #include <asm/msa.h> +#include <asm/syscalls.h> #include "signal-common.h" @@ -569,7 +570,7 @@ void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, return (void __user __force *)(-1UL); /* - * FPU emulator may have it's own trampoline active just + * FPU emulator may have its own trampoline active just * above the user stack, 16-bytes before the next lowest * 16 byte boundary. Try to avoid trashing it. */ diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 59b8965433c2..73081d4ee8c1 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -18,6 +18,7 @@ #include <asm/compat-signal.h> #include <linux/uaccess.h> #include <asm/unistd.h> +#include <asm/syscalls.h> #include "signal-common.h" diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index cfc77b69420a..139d2596b0d4 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -24,6 +24,7 @@ #include <asm/ucontext.h> #include <asm/fpu.h> #include <asm/cpu-features.h> +#include <asm/syscalls.h> #include "signal-common.h" @@ -32,9 +33,6 @@ */ #define __NR_N32_restart_syscall 6214 -extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *); -extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *); - struct ucontextn32 { u32 uc_flags; s32 uc_link; diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c index 299a7a28ca33..4f0458459650 100644 --- a/arch/mips/kernel/signal_o32.c +++ b/arch/mips/kernel/signal_o32.c @@ -19,6 +19,7 @@ #include <asm/dsp.h> #include <asm/sim.h> #include <asm/unistd.h> +#include <asm/syscalls.h> #include "signal-common.h" diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index b3dbf9ecb0d6..35b8d810833c 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -518,7 +518,7 @@ static void bmips_set_reset_vec(int cpu, u32 val) info.val = val; bmips_set_reset_vec_remote(&info); } else { - void __iomem *cbr = BMIPS_GET_CBR(); + void __iomem *cbr = bmips_cbr_addr; if (cpu == 0) __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_0); @@ -591,7 +591,8 @@ asmlinkage void __weak plat_wired_tlb_setup(void) void bmips_cpu_setup(void) { - void __iomem __maybe_unused *cbr = BMIPS_GET_CBR(); + void __iomem __maybe_unused *cbr = bmips_cbr_addr; + u32 __maybe_unused rac_addr; u32 __maybe_unused cfg; switch (current_cpu_type()) { @@ -620,6 +621,23 @@ void bmips_cpu_setup(void) __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE); break; + case CPU_BMIPS4350: + rac_addr = BMIPS_RAC_CONFIG_1; + + if (!(read_c0_brcm_cmt_local() & (1 << 31))) + rac_addr = BMIPS_RAC_CONFIG; + + /* Enable data RAC */ + cfg = __raw_readl(cbr + rac_addr); + __raw_writel(cfg | 0xf, cbr + rac_addr); + __raw_readl(cbr + rac_addr); + + /* Flush stale data out of the readahead cache */ + cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG); + __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG); + __raw_readl(cbr + BMIPS_RAC_CONFIG); + break; + case CPU_BMIPS4380: /* CBG workaround for early BMIPS4380 CPUs */ switch (read_c0_prid()) { diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index f6c37d407f36..82c8f9b9573c 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -7,6 +7,7 @@ #include <linux/cpu.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/memblock.h> #include <linux/sched/task_stack.h> #include <linux/sched/hotplug.h> #include <linux/slab.h> @@ -20,12 +21,24 @@ #include <asm/mipsregs.h> #include <asm/pm-cps.h> #include <asm/r4kcache.h> +#include <asm/regdef.h> #include <asm/smp.h> #include <asm/smp-cps.h> #include <asm/time.h> #include <asm/uasm.h> +#define BEV_VEC_SIZE 0x500 +#define BEV_VEC_ALIGN 0x1000 + +enum label_id { + label_not_nmi = 1, +}; + +UASM_L_LA(_not_nmi) + static DECLARE_BITMAP(core_power, NR_CPUS); +static u64 core_entry_reg; +static phys_addr_t cps_vec_pa; struct core_boot_config *mips_cps_core_bootcfg; @@ -34,10 +47,126 @@ static unsigned __init core_vpe_count(unsigned int cluster, unsigned core) return min(smp_max_threads, mips_cps_numvps(cluster, core)); } +static void __init *mips_cps_build_core_entry(void *addr) +{ + extern void (*nmi_handler)(void); + u32 *p = addr; + u32 val; + struct uasm_label labels[2]; + struct uasm_reloc relocs[2]; + struct uasm_label *l = labels; + struct uasm_reloc *r = relocs; + + memset(labels, 0, sizeof(labels)); + memset(relocs, 0, sizeof(relocs)); + + uasm_i_mfc0(&p, GPR_K0, C0_STATUS); + UASM_i_LA(&p, GPR_T9, ST0_NMI); + uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T9); + + uasm_il_bnez(&p, &r, GPR_K0, label_not_nmi); + uasm_i_nop(&p); + UASM_i_LA(&p, GPR_K0, (long)&nmi_handler); + + uasm_l_not_nmi(&l, p); + + val = CAUSEF_IV; + uasm_i_lui(&p, GPR_K0, val >> 16); + uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff); + uasm_i_mtc0(&p, GPR_K0, C0_CAUSE); + val = ST0_CU1 | ST0_CU0 | ST0_BEV | ST0_KX_IF_64; + uasm_i_lui(&p, GPR_K0, val >> 16); + uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff); + uasm_i_mtc0(&p, GPR_K0, C0_STATUS); + uasm_i_ehb(&p); + uasm_i_ori(&p, GPR_A0, 0, read_c0_config() & CONF_CM_CMASK); + UASM_i_LA(&p, GPR_A1, (long)mips_gcr_base); +#if defined(KBUILD_64BIT_SYM32) || defined(CONFIG_32BIT) + UASM_i_LA(&p, GPR_T9, CKSEG1ADDR(__pa_symbol(mips_cps_core_boot))); +#else + UASM_i_LA(&p, GPR_T9, TO_UNCAC(__pa_symbol(mips_cps_core_boot))); +#endif + uasm_i_jr(&p, GPR_T9); + uasm_i_nop(&p); + + uasm_resolve_relocs(relocs, labels); + + return p; +} + +static bool __init check_64bit_reset(void) +{ + bool cx_64bit_reset = false; + + mips_cm_lock_other(0, 0, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); + write_gcr_co_reset64_base(CM_GCR_Cx_RESET64_BASE_BEVEXCBASE); + if ((read_gcr_co_reset64_base() & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) == + CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) + cx_64bit_reset = true; + mips_cm_unlock_other(); + + return cx_64bit_reset; +} + +static int __init allocate_cps_vecs(void) +{ + /* Try to allocate in KSEG1 first */ + cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, + 0x0, CSEGX_SIZE - 1); + + if (cps_vec_pa) + core_entry_reg = CKSEG1ADDR(cps_vec_pa) & + CM_GCR_Cx_RESET_BASE_BEVEXCBASE; + + if (!cps_vec_pa && mips_cm_is64) { + phys_addr_t end; + + if (check_64bit_reset()) { + pr_info("VP Local Reset Exception Base support 47 bits address\n"); + end = MEMBLOCK_ALLOC_ANYWHERE; + } else { + end = SZ_4G - 1; + } + cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, 0, end); + if (cps_vec_pa) { + if (check_64bit_reset()) + core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) | + CM_GCR_Cx_RESET_BASE_MODE; + else + core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) | + CM_GCR_Cx_RESET_BASE_MODE; + } + } + + if (!cps_vec_pa) + return -ENOMEM; + + return 0; +} + +static void __init setup_cps_vecs(void) +{ + void *cps_vec; + + cps_vec = (void *)CKSEG1ADDR_OR_64BIT(cps_vec_pa); + mips_cps_build_core_entry(cps_vec); + + memcpy(cps_vec + 0x200, &excep_tlbfill, 0x80); + memcpy(cps_vec + 0x280, &excep_xtlbfill, 0x80); + memcpy(cps_vec + 0x300, &excep_cache, 0x80); + memcpy(cps_vec + 0x380, &excep_genex, 0x80); + memcpy(cps_vec + 0x400, &excep_intex, 0x80); + memcpy(cps_vec + 0x480, &excep_ejtag, 0x80); + + /* Make sure no prefetched data in cache */ + blast_inv_dcache_range(CKSEG0ADDR_OR_64BIT(cps_vec_pa), CKSEG0ADDR_OR_64BIT(cps_vec_pa) + BEV_VEC_SIZE); + bc_inv(CKSEG0ADDR_OR_64BIT(cps_vec_pa), BEV_VEC_SIZE); + __sync(); +} + static void __init cps_smp_setup(void) { unsigned int nclusters, ncores, nvpes, core_vpes; - unsigned long core_entry; int cl, c, v; /* Detect & record VPE topology */ @@ -94,10 +223,11 @@ static void __init cps_smp_setup(void) /* Make core 0 coherent with everything */ write_gcr_cl_coherence(0xff); - if (mips_cm_revision() >= CM_REV_CM3) { - core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); - write_gcr_bev_base(core_entry); - } + if (allocate_cps_vecs()) + pr_err("Failed to allocate CPS vectors\n"); + + if (core_entry_reg && mips_cm_revision() >= CM_REV_CM3) + write_gcr_bev_base(core_entry_reg); #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ @@ -110,10 +240,14 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) { unsigned ncores, core_vpes, c, cca; bool cca_unsuitable, cores_limited; - u32 *entry_code; mips_mt_set_cpuoptions(); + if (!core_entry_reg) { + pr_err("core_entry address unsuitable, disabling smp-cps\n"); + goto err_out; + } + /* Detect whether the CCA is unsuited to multi-core SMP */ cca = read_c0_config() & CONF_CM_CMASK; switch (cca) { @@ -145,20 +279,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "", cpu_has_dc_aliases ? "dcache aliasing" : ""); - /* - * Patch the start of mips_cps_core_entry to provide: - * - * s0 = kseg0 CCA - */ - entry_code = (u32 *)&mips_cps_core_entry; - uasm_i_addiu(&entry_code, 16, 0, cca); - UASM_i_LA(&entry_code, 17, (long)mips_gcr_base); - BUG_ON((void *)entry_code > (void *)&mips_cps_core_entry_patch_end); - blast_dcache_range((unsigned long)&mips_cps_core_entry, - (unsigned long)entry_code); - bc_wback_inv((unsigned long)&mips_cps_core_entry, - (void *)entry_code - (void *)&mips_cps_core_entry); - __sync(); + setup_cps_vecs(); /* Allocate core boot configuration structs */ ncores = mips_cps_numcores(0); @@ -213,7 +334,10 @@ static void boot_core(unsigned int core, unsigned int vpe_id) mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); /* Set its reset vector */ - write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); + if (mips_cm_is64) + write_gcr_co_reset64_base(core_entry_reg); + else + write_gcr_co_reset_base(core_entry_reg); /* Ensure its coherency is disabled */ write_gcr_co_coherence(0); @@ -222,7 +346,10 @@ static void boot_core(unsigned int core, unsigned int vpe_id) write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB); /* Ensure the core can access the GCRs */ - set_gcr_access(1 << core); + if (mips_cm_revision() < CM_REV_CM3) + set_gcr_access(1 << core); + else + set_gcr_access_cm3(1 << core); if (mips_cpc_present()) { /* Reset the core */ @@ -290,7 +417,6 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle) unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; - unsigned long core_entry; unsigned int remote; int err; @@ -314,8 +440,10 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle) if (cpu_has_vp) { mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); - core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); - write_gcr_co_reset_base(core_entry); + if (mips_cm_is64) + write_gcr_co_reset64_base(core_entry_reg); + else + write_gcr_co_reset_base(core_entry_reg); mips_cm_unlock_other(); } diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 82e2e051b416..39e193cad2b9 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -10,6 +10,7 @@ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/profile.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/threads.h> @@ -438,7 +439,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } /* preload SMP state for boot cpu */ -void smp_prepare_boot_cpu(void) +void __init smp_prepare_boot_cpu(void) { if (mp_ops->prepare_boot_cpu) mp_ops->prepare_boot_cpu(); @@ -461,18 +462,18 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) return -EIO; } - synchronise_count_master(cpu); - /* Wait for CPU to finish startup & mark itself online before return */ wait_for_completion(&cpu_running); return 0; } +#ifdef CONFIG_PROFILING /* Not really SMP stuff ... */ int setup_profiling_timer(unsigned int multiplier) { return 0; } +#endif static void flush_tlb_all_ipi(void *info) { diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c index d5d96214cce5..dd31e3fffd24 100644 --- a/arch/mips/kernel/spram.c +++ b/arch/mips/kernel/spram.c @@ -12,6 +12,7 @@ #include <asm/mipsregs.h> #include <asm/r4kcache.h> #include <asm/hazards.h> +#include <asm/spram.h> /* * These definitions are correct for the 24K/34K/74K SPRAM sample @@ -25,10 +26,6 @@ #define ERRCTL_SPRAM (1 << 28) -/* errctl access */ -#define read_c0_errctl(x) read_c0_ecc(x) -#define write_c0_errctl(x) write_c0_ecc(x) - /* * Different semantics to the set_c0_* function built by __BUILD_SET_C0 */ diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c index abdd7aaa3311..39156592582e 100644 --- a/arch/mips/kernel/sync-r4k.c +++ b/arch/mips/kernel/sync-r4k.c @@ -2,121 +2,244 @@ /* * Count register synchronisation. * - * All CPUs will have their count registers synchronised to the CPU0 next time - * value. This can cause a small timewarp for CPU0. All other CPU's should - * not have done anything significant (but they may have had interrupts - * enabled briefly - prom_smp_finish() should not be responsible for enabling - * interrupts...) + * Derived from arch/x86/kernel/tsc_sync.c + * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar */ #include <linux/kernel.h> #include <linux/irqflags.h> #include <linux/cpumask.h> +#include <linux/atomic.h> +#include <linux/nmi.h> +#include <linux/smp.h> +#include <linux/spinlock.h> #include <asm/r4k-timer.h> -#include <linux/atomic.h> -#include <asm/barrier.h> #include <asm/mipsregs.h> +#include <asm/time.h> -static unsigned int initcount = 0; -static atomic_t count_count_start = ATOMIC_INIT(0); -static atomic_t count_count_stop = ATOMIC_INIT(0); - -#define COUNTON 100 -#define NR_LOOPS 3 - -void synchronise_count_master(int cpu) -{ - int i; - unsigned long flags; - - pr_info("Synchronize counters for CPU %u: ", cpu); +#define COUNTON 100 +#define NR_LOOPS 3 +#define LOOP_TIMEOUT 20 - local_irq_save(flags); +/* + * Entry/exit counters that make sure that both CPUs + * run the measurement code at once: + */ +static atomic_t start_count; +static atomic_t stop_count; +static atomic_t test_runs; - /* - * We loop a few times to get a primed instruction cache, - * then the last pass is more or less synchronised and - * the master and slaves each set their cycle counters to a known - * value all at once. This reduces the chance of having random offsets - * between the processors, and guarantees that the maximum - * delay between the cycle counters is never bigger than - * the latency of information-passing (cachelines) between - * two CPUs. - */ +/* + * We use a raw spinlock in this exceptional case, because + * we want to have the fastest, inlined, non-debug version + * of a critical section, to be able to prove counter time-warps: + */ +static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; - for (i = 0; i < NR_LOOPS; i++) { - /* slaves loop on '!= 2' */ - while (atomic_read(&count_count_start) != 1) - mb(); - atomic_set(&count_count_stop, 0); - smp_wmb(); +static uint32_t last_counter; +static uint32_t max_warp; +static int nr_warps; +static int random_warps; - /* Let the slave writes its count register */ - atomic_inc(&count_count_start); +/* + * Counter warp measurement loop running on both CPUs. + */ +static uint32_t check_counter_warp(void) +{ + uint32_t start, now, prev, end, cur_max_warp = 0; + int i, cur_warps = 0; - /* Count will be initialised to current timer */ - if (i == 1) - initcount = read_c0_count(); + start = read_c0_count(); + end = start + (uint32_t) mips_hpt_frequency / 1000 * LOOP_TIMEOUT; + for (i = 0; ; i++) { /* - * Everyone initialises count in the last loop: + * We take the global lock, measure counter, save the + * previous counter that was measured (possibly on + * another CPU) and update the previous counter timestamp. */ - if (i == NR_LOOPS-1) - write_c0_count(initcount); + arch_spin_lock(&sync_lock); + prev = last_counter; + now = read_c0_count(); + last_counter = now; + arch_spin_unlock(&sync_lock); /* - * Wait for slave to leave the synchronization point: + * Be nice every now and then (and also check whether + * measurement is done [we also insert a 10 million + * loops safety exit, so we dont lock up in case the + * counter is totally broken]): */ - while (atomic_read(&count_count_stop) != 1) - mb(); - atomic_set(&count_count_start, 0); - smp_wmb(); - atomic_inc(&count_count_stop); + if (unlikely(!(i & 7))) { + if (now > end || i > 10000000) + break; + cpu_relax(); + touch_nmi_watchdog(); + } + /* + * Outside the critical section we can now see whether + * we saw a time-warp of the counter going backwards: + */ + if (unlikely(prev > now)) { + arch_spin_lock(&sync_lock); + max_warp = max(max_warp, prev - now); + cur_max_warp = max_warp; + /* + * Check whether this bounces back and forth. Only + * one CPU should observe time going backwards. + */ + if (cur_warps != nr_warps) + random_warps++; + nr_warps++; + cur_warps = nr_warps; + arch_spin_unlock(&sync_lock); + } + } + WARN(!(now-start), + "Warning: zero counter calibration delta: %d [max: %d]\n", + now-start, end-start); + return cur_max_warp; +} + +/* + * The freshly booted CPU initiates this via an async SMP function call. + */ +static void check_counter_sync_source(void *__cpu) +{ + unsigned int cpu = (unsigned long)__cpu; + int cpus = 2; + + atomic_set(&test_runs, NR_LOOPS); +retry: + /* Wait for the target to start. */ + while (atomic_read(&start_count) != cpus - 1) + cpu_relax(); + + /* + * Trigger the target to continue into the measurement too: + */ + atomic_inc(&start_count); + + check_counter_warp(); + + while (atomic_read(&stop_count) != cpus-1) + cpu_relax(); + + /* + * If the test was successful set the number of runs to zero and + * stop. If not, decrement the number of runs an check if we can + * retry. In case of random warps no retry is attempted. + */ + if (!nr_warps) { + atomic_set(&test_runs, 0); + + pr_info("Counter synchronization [CPU#%d -> CPU#%u]: passed\n", + smp_processor_id(), cpu); + } else if (atomic_dec_and_test(&test_runs) || random_warps) { + /* Force it to 0 if random warps brought us here */ + atomic_set(&test_runs, 0); + + pr_info("Counter synchronization [CPU#%d -> CPU#%u]:\n", + smp_processor_id(), cpu); + pr_info("Measured %d cycles counter warp between CPUs", max_warp); + if (random_warps) + pr_warn("Counter warped randomly between CPUs\n"); } - /* Arrange for an interrupt in a short while */ - write_c0_compare(read_c0_count() + COUNTON); - local_irq_restore(flags); + /* + * Reset it - just in case we boot another CPU later: + */ + atomic_set(&start_count, 0); + random_warps = 0; + nr_warps = 0; + max_warp = 0; + last_counter = 0; + + /* + * Let the target continue with the bootup: + */ + atomic_inc(&stop_count); /* - * i386 code reported the skew here, but the - * count registers were almost certainly out of sync - * so no point in alarming people + * Retry, if there is a chance to do so. */ - pr_cont("done.\n"); + if (atomic_read(&test_runs) > 0) + goto retry; } +/* + * Freshly booted CPUs call into this: + */ void synchronise_count_slave(int cpu) { - int i; - unsigned long flags; + uint32_t cur_max_warp, gbl_max_warp, count; + int cpus = 2; - local_irq_save(flags); + if (!cpu_has_counter || !mips_hpt_frequency) + return; + /* Kick the control CPU into the counter synchronization function */ + smp_call_function_single(cpumask_first(cpu_online_mask), + check_counter_sync_source, + (unsigned long *)(unsigned long)cpu, 0); +retry: /* - * Not every cpu is online at the time this gets called, - * so we first wait for the master to say everyone is ready + * Register this CPU's participation and wait for the + * source CPU to start the measurement: */ + atomic_inc(&start_count); + while (atomic_read(&start_count) != cpus) + cpu_relax(); - for (i = 0; i < NR_LOOPS; i++) { - atomic_inc(&count_count_start); - while (atomic_read(&count_count_start) != 2) - mb(); + cur_max_warp = check_counter_warp(); - /* - * Everyone initialises count in the last loop: - */ - if (i == NR_LOOPS-1) - write_c0_count(initcount); + /* + * Store the maximum observed warp value for a potential retry: + */ + gbl_max_warp = max_warp; + + /* + * Ok, we are done: + */ + atomic_inc(&stop_count); + + /* + * Wait for the source CPU to print stuff: + */ + while (atomic_read(&stop_count) != cpus) + cpu_relax(); - atomic_inc(&count_count_stop); - while (atomic_read(&count_count_stop) != 2) - mb(); + /* + * Reset it for the next sync test: + */ + atomic_set(&stop_count, 0); + + /* + * Check the number of remaining test runs. If not zero, the test + * failed and a retry with adjusted counter is possible. If zero the + * test was either successful or failed terminally. + */ + if (!atomic_read(&test_runs)) { + /* Arrange for an interrupt in a short while */ + write_c0_compare(read_c0_count() + COUNTON); + return; } - /* Arrange for an interrupt in a short while */ - write_c0_compare(read_c0_count() + COUNTON); - local_irq_restore(flags); + /* + * If the warp value of this CPU is 0, then the other CPU + * observed time going backwards so this counter was ahead and + * needs to move backwards. + */ + if (!cur_max_warp) + cur_max_warp = -gbl_max_warp; + + count = read_c0_count(); + count += cur_max_warp; + write_c0_count(count); + + pr_debug("Counter compensate: CPU%u observed %d warp\n", cpu, cur_max_warp); + + goto retry; + } -#undef NR_LOOPS diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index ae93a607ddf7..1bfc34a2e5b3 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -39,6 +39,7 @@ #include <asm/shmparam.h> #include <asm/sync.h> #include <asm/sysmips.h> +#include <asm/syscalls.h> #include <asm/switch_to.h> /* diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile index e6b21de65cca..56f6f093bb88 100644 --- a/arch/mips/kernel/syscalls/Makefile +++ b/arch/mips/kernel/syscalls/Makefile @@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm $(shell mkdir -p $(uapi) $(kapi)) syshdr := $(srctree)/scripts/syscallhdr.sh -sysnr := $(srctree)/$(src)/syscallnr.sh +sysnr := $(src)/syscallnr.sh systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index a842b41c8e06..0b9b7e25b69a 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -354,7 +354,7 @@ 412 n32 utimensat_time64 sys_utimensat 413 n32 pselect6_time64 compat_sys_pselect6_time64 414 n32 ppoll_time64 compat_sys_ppoll_time64 -416 n32 io_pgetevents_time64 sys_io_pgetevents +416 n32 io_pgetevents_time64 compat_sys_io_pgetevents_time64 417 n32 recvmmsg_time64 compat_sys_recvmmsg_time64 418 n32 mq_timedsend_time64 sys_mq_timedsend 419 n32 mq_timedreceive_time64 sys_mq_timedreceive @@ -395,3 +395,13 @@ 454 n32 futex_wake sys_futex_wake 455 n32 futex_wait sys_futex_wait 456 n32 futex_requeue sys_futex_requeue +457 n32 statmount sys_statmount +458 n32 listmount sys_listmount +459 n32 lsm_get_self_attr sys_lsm_get_self_attr +460 n32 lsm_set_self_attr sys_lsm_set_self_attr +461 n32 lsm_list_modules sys_lsm_list_modules +462 n32 mseal sys_mseal +463 n32 setxattrat sys_setxattrat +464 n32 getxattrat sys_getxattrat +465 n32 listxattrat sys_listxattrat +466 n32 removexattrat sys_removexattrat diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index 116ff501bf92..c844cd5cda62 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -371,3 +371,13 @@ 454 n64 futex_wake sys_futex_wake 455 n64 futex_wait sys_futex_wait 456 n64 futex_requeue sys_futex_requeue +457 n64 statmount sys_statmount +458 n64 listmount sys_listmount +459 n64 lsm_get_self_attr sys_lsm_get_self_attr +460 n64 lsm_set_self_attr sys_lsm_set_self_attr +461 n64 lsm_list_modules sys_lsm_list_modules +462 n64 mseal sys_mseal +463 n64 setxattrat sys_setxattrat +464 n64 getxattrat sys_getxattrat +465 n64 listxattrat sys_listxattrat +466 n64 removexattrat sys_removexattrat diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 525cc54bc63b..349b8aad1159 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -27,7 +27,7 @@ 17 o32 break sys_ni_syscall # 18 was sys_stat 18 o32 unused18 sys_ni_syscall -19 o32 lseek sys_lseek +19 o32 lseek sys_lseek compat_sys_lseek 20 o32 getpid sys_getpid 21 o32 mount sys_mount 22 o32 umount sys_oldumount @@ -403,7 +403,7 @@ 412 o32 utimensat_time64 sys_utimensat sys_utimensat 413 o32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 414 o32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 -416 o32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents +416 o32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 417 o32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 418 o32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend 419 o32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive @@ -444,3 +444,13 @@ 454 o32 futex_wake sys_futex_wake 455 o32 futex_wait sys_futex_wait 456 o32 futex_requeue sys_futex_requeue +457 o32 statmount sys_statmount +458 o32 listmount sys_listmount +459 o32 lsm_get_self_attr sys_lsm_get_self_attr +460 o32 lsm_set_self_attr sys_lsm_set_self_attr +461 o32 lsm_list_modules sys_lsm_list_modules +462 o32 mseal sys_mseal +463 o32 setxattrat sys_setxattrat +464 o32 getxattrat sys_getxattrat +465 o32 listxattrat sys_listxattrat +466 o32 removexattrat sys_removexattrat diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 246c6a6b0261..39e248d0ed59 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -38,6 +38,7 @@ #include <linux/kdb.h> #include <linux/irq.h> #include <linux/perf_event.h> +#include <linux/string_choices.h> #include <asm/addrspace.h> #include <asm/bootinfo.h> @@ -58,6 +59,7 @@ #include <asm/module.h> #include <asm/msa.h> #include <asm/ptrace.h> +#include <asm/regdef.h> #include <asm/sections.h> #include <asm/siginfo.h> #include <asm/tlbdebug.h> @@ -1704,10 +1706,10 @@ static inline __init void parity_protection_init(void) l2parity &= l1parity; /* Probe L1 ECC support */ - cp0_ectl = read_c0_ecc(); - write_c0_ecc(cp0_ectl | ERRCTL_PE); + cp0_ectl = read_c0_errctl(); + write_c0_errctl(cp0_ectl | ERRCTL_PE); back_to_back_c0_hazard(); - cp0_ectl = read_c0_ecc(); + cp0_ectl = read_c0_errctl(); /* Probe L2 ECC support */ gcr_ectl = read_gcr_err_control(); @@ -1726,9 +1728,9 @@ static inline __init void parity_protection_init(void) cp0_ectl |= ERRCTL_PE; else cp0_ectl &= ~ERRCTL_PE; - write_c0_ecc(cp0_ectl); + write_c0_errctl(cp0_ectl); back_to_back_c0_hazard(); - WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity); + WARN_ON(!!(read_c0_errctl() & ERRCTL_PE) != l1parity); /* Configure L2 ECC checking */ if (l2parity) @@ -1740,8 +1742,8 @@ static inline __init void parity_protection_init(void) gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN; WARN_ON(!!gcr_ectl != l2parity); - pr_info("Cache parity protection %sabled\n", - l1parity ? "en" : "dis"); + pr_info("Cache parity protection %s\n", + str_enabled_disabled(l1parity)); return; } @@ -1760,18 +1762,18 @@ static inline __init void parity_protection_init(void) unsigned long errctl; unsigned int l1parity_present, l2parity_present; - errctl = read_c0_ecc(); + errctl = read_c0_errctl(); errctl &= ~(ERRCTL_PE|ERRCTL_L2P); /* probe L1 parity support */ - write_c0_ecc(errctl | ERRCTL_PE); + write_c0_errctl(errctl | ERRCTL_PE); back_to_back_c0_hazard(); - l1parity_present = (read_c0_ecc() & ERRCTL_PE); + l1parity_present = (read_c0_errctl() & ERRCTL_PE); /* probe L2 parity support */ - write_c0_ecc(errctl|ERRCTL_L2P); + write_c0_errctl(errctl|ERRCTL_L2P); back_to_back_c0_hazard(); - l2parity_present = (read_c0_ecc() & ERRCTL_L2P); + l2parity_present = (read_c0_errctl() & ERRCTL_L2P); if (l1parity_present && l2parity_present) { if (l1parity) @@ -1790,20 +1792,20 @@ static inline __init void parity_protection_init(void) printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl); - write_c0_ecc(errctl); + write_c0_errctl(errctl); back_to_back_c0_hazard(); - errctl = read_c0_ecc(); + errctl = read_c0_errctl(); printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl); if (l1parity_present) - printk(KERN_INFO "Cache parity protection %sabled\n", - (errctl & ERRCTL_PE) ? "en" : "dis"); + pr_info("Cache parity protection %s\n", + str_enabled_disabled(errctl & ERRCTL_PE)); if (l2parity_present) { if (l1parity_present && l1parity) errctl ^= ERRCTL_L2P; - printk(KERN_INFO "L2 cache parity protection %sabled\n", - (errctl & ERRCTL_L2P) ? "en" : "dis"); + pr_info("L2 cache parity protection %s\n", + str_enabled_disabled(errctl & ERRCTL_L2P)); } } break; @@ -1811,11 +1813,11 @@ static inline __init void parity_protection_init(void) case CPU_5KC: case CPU_5KE: case CPU_LOONGSON32: - write_c0_ecc(0x80000000); + write_c0_errctl(0x80000000); back_to_back_c0_hazard(); /* Set the PE bit (bit 31) in the c0_errctl register. */ - printk(KERN_INFO "Cache parity protection %sabled\n", - (read_c0_ecc() & 0x80000000) ? "en" : "dis"); + pr_info("Cache parity protection %s\n", + str_enabled_disabled(read_c0_errctl() & 0x80000000)); break; case CPU_20KC: case CPU_25KF: @@ -1886,8 +1888,8 @@ asmlinkage void do_ftlb(void) if ((cpu_has_mips_r2_r6) && (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) || ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) { - pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", - read_c0_ecc()); + pr_err("FTLB error exception, cp0_errctl=0x%08x:\n", + read_c0_errctl()); pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); reg_val = read_c0_cacheerr(); pr_err("c0_cacheerr == %08x\n", reg_val); @@ -2007,7 +2009,13 @@ unsigned long vi_handlers[64]; void reserve_exception_space(phys_addr_t addr, unsigned long size) { - memblock_reserve(addr, size); + /* + * reserve exception space on CPUs other than CPU0 + * is too late, since memblock is unavailable when APs + * up + */ + if (smp_processor_id() == 0) + memblock_reserve(addr, size); } void __init *set_except_vector(int n, void *addr) @@ -2035,13 +2043,12 @@ void __init *set_except_vector(int n, void *addr) unsigned long jump_mask = ~((1 << 28) - 1); #endif u32 *buf = (u32 *)(ebase + 0x200); - unsigned int k0 = 26; if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { uasm_i_j(&buf, handler & ~jump_mask); uasm_i_nop(&buf); } else { - UASM_i_LA(&buf, k0, handler); - uasm_i_jr(&buf, k0); + UASM_i_LA(&buf, GPR_K0, handler); + uasm_i_jr(&buf, GPR_K0); uasm_i_nop(&buf); } local_flush_icache_range(ebase + 0x200, (unsigned long)buf); @@ -2055,110 +2062,71 @@ static void do_default_vi(void) panic("Caught unexpected vectored interrupt."); } -static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) +void *set_vi_handler(int n, vi_handler_t addr) { + extern const u8 except_vec_vi[]; + extern const u8 except_vec_vi_ori[], except_vec_vi_end[]; + extern const u8 rollback_except_vec_vi[]; unsigned long handler; unsigned long old_handler = vi_handlers[n]; int srssets = current_cpu_data.srsets; u16 *h; unsigned char *b; + const u8 *vec_start; + int ori_offset; + int handler_len; BUG_ON(!cpu_has_veic && !cpu_has_vint); if (addr == NULL) { handler = (unsigned long) do_default_vi; - srs = 0; } else handler = (unsigned long) addr; vi_handlers[n] = handler; b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); - if (srs >= srssets) - panic("Shadow register set %d not supported", srs); - if (cpu_has_veic) { if (board_bind_eic_interrupt) - board_bind_eic_interrupt(n, srs); + board_bind_eic_interrupt(n, 0); } else if (cpu_has_vint) { /* SRSMap is only defined if shadow sets are implemented */ if (srssets > 1) - change_c0_srsmap(0xf << n*4, srs << n*4); + change_c0_srsmap(0xf << n*4, 0 << n*4); } - if (srs == 0) { - /* - * If no shadow set is selected then use the default handler - * that does normal register saving and standard interrupt exit - */ - extern const u8 except_vec_vi[], except_vec_vi_lui[]; - extern const u8 except_vec_vi_ori[], except_vec_vi_end[]; - extern const u8 rollback_except_vec_vi[]; - const u8 *vec_start = using_rollback_handler() ? - rollback_except_vec_vi : except_vec_vi; + vec_start = using_rollback_handler() ? rollback_except_vec_vi : + except_vec_vi; #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) - const int lui_offset = except_vec_vi_lui - vec_start + 2; - const int ori_offset = except_vec_vi_ori - vec_start + 2; + ori_offset = except_vec_vi_ori - vec_start + 2; #else - const int lui_offset = except_vec_vi_lui - vec_start; - const int ori_offset = except_vec_vi_ori - vec_start; + ori_offset = except_vec_vi_ori - vec_start; #endif - const int handler_len = except_vec_vi_end - vec_start; + handler_len = except_vec_vi_end - vec_start; - if (handler_len > VECTORSPACING) { - /* - * Sigh... panicing won't help as the console - * is probably not configured :( - */ - panic("VECTORSPACING too small"); - } - - set_handler(((unsigned long)b - ebase), vec_start, -#ifdef CONFIG_CPU_MICROMIPS - (handler_len - 1)); -#else - handler_len); -#endif - h = (u16 *)(b + lui_offset); - *h = (handler >> 16) & 0xffff; - h = (u16 *)(b + ori_offset); - *h = (handler & 0xffff); - local_flush_icache_range((unsigned long)b, - (unsigned long)(b+handler_len)); - } - else { + if (handler_len > VECTORSPACING) { /* - * In other cases jump directly to the interrupt handler. It - * is the handler's responsibility to save registers if required - * (eg hi/lo) and return from the exception using "eret". + * Sigh... panicing won't help as the console + * is probably not configured :( */ - u32 insn; + panic("VECTORSPACING too small"); + } - h = (u16 *)b; - /* j handler */ + set_handler(((unsigned long)b - ebase), vec_start, #ifdef CONFIG_CPU_MICROMIPS - insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1); + (handler_len - 1)); #else - insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2); + handler_len); #endif - h[0] = (insn >> 16) & 0xffff; - h[1] = insn & 0xffff; - h[2] = 0; - h[3] = 0; - local_flush_icache_range((unsigned long)b, - (unsigned long)(b+8)); - } + /* insert offset into vi_handlers[] */ + h = (u16 *)(b + ori_offset); + *h = n * sizeof(handler); + local_flush_icache_range((unsigned long)b, + (unsigned long)(b+handler_len)); return (void *)old_handler; } -void *set_vi_handler(int n, vi_handler_t addr) -{ - return set_vi_srs_handler(n, addr, 0); -} - -extern void tlb_init(void); - /* * Timer interrupt */ @@ -2332,7 +2300,7 @@ static const char panic_null_cerr[] = void set_uncached_handler(unsigned long offset, void *addr, unsigned long size) { - unsigned long uncached_ebase = CKSEG1ADDR(ebase); + unsigned long uncached_ebase = CKSEG1ADDR_OR_64BIT(__pa(ebase)); if (!addr) panic(panic_null_cerr); @@ -2384,10 +2352,13 @@ void __init trap_init(void) * EVA is special though as it allows segments to be rearranged * and to become uncached during cache error handling. */ - if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000)) + if (!IS_ENABLED(CONFIG_EVA) && ebase_pa < 0x20000000) ebase = CKSEG0ADDR(ebase_pa); else ebase = (unsigned long)phys_to_virt(ebase_pa); + if (ebase_pa >= 0x20000000) + pr_warn("ebase(%pa) should better be in KSeg0", + &ebase_pa); } if (cpu_has_mmips) { @@ -2418,7 +2389,7 @@ void __init trap_init(void) set_except_vector(i, handle_reserved); /* - * Copy the EJTAG debug exception vector handler code to it's final + * Copy the EJTAG debug exception vector handler code to its final * destination. */ if (cpu_has_ejtag && board_ejtag_handler_setup) diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index f4cf94e92ec3..db652c99b72e 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -91,6 +91,7 @@ #include <asm/inst.h> #include <asm/unaligned-emul.h> #include <asm/mmu_context.h> +#include <asm/traps.h> #include <linux/uaccess.h> #include "access-helper.h" diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index f6d40e43f108..75c9d3618f58 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -11,10 +11,10 @@ #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/mman.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/slab.h> -#include <linux/timekeeper_internal.h> #include <asm/abi.h> #include <asm/mips-cps.h> @@ -24,7 +24,7 @@ #include <vdso/vsyscall.h> /* Kernel-provided data used by the VDSO. */ -static union mips_vdso_data mips_vdso_data __page_aligned_data; +static union vdso_data_store mips_vdso_data __page_aligned_data; struct vdso_data *vdso_data = mips_vdso_data.data; /* @@ -98,11 +98,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) return -EINTR; if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) { + unsigned long unused; + /* Map delay slot emulation page */ - base = mmap_region(NULL, STACK_TOP, PAGE_SIZE, - VM_READ | VM_EXEC | - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, - 0, NULL); + base = do_mmap(NULL, STACK_TOP, PAGE_SIZE, PROT_READ | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, 0, 0, &unused, + NULL); if (IS_ERR_VALUE(base)) { ret = base; goto out; diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 9ff55cb80a64..2b708fac8d2c 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -61,6 +61,7 @@ SECTIONS /* read-only */ _text = .; /* Text and read-only data */ .text : { + HEAD_TEXT TEXT_TEXT SCHED_TEXT LOCK_TEXT diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c index 667bc75f6420..84124ac2d2a5 100644 --- a/arch/mips/kernel/vpe-mt.c +++ b/arch/mips/kernel/vpe-mt.c @@ -95,8 +95,8 @@ int vpe_run(struct vpe *v) * We don't pass the memsize here, so VPE programs need to be * compiled with DFLT_STACK_SIZE and DFLT_HEAP_SIZE defined. */ - mttgpr(7, 0); - mttgpr(6, v->ntcs); + mttgpr($7, 0); + mttgpr($6, v->ntcs); /* set up VPE1 */ /* diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index e9a0cfd02ae2..737d0d4fdcd3 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -6,9 +6,9 @@ * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2013 Imagination Technologies Ltd. * - * VPE spport module for loading a MIPS SP program into VPE1. The SP + * VPE support module for loading a MIPS SP program into VPE1. The SP * environment is rather simple since there are no TLBs. It needs - * to be relocatable (or partiall linked). Initialize your stack in + * to be relocatable (or partially linked). Initialize your stack in * the startup-code. The loader looks for the symbol __start and sets * up the execution to resume from there. To load and run, simply do * a cat SP 'binary' to the /dev/vpe1 device. |