diff options
Diffstat (limited to 'arch/mips/kernel')
43 files changed, 878 insertions, 466 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index ecf3278a32f7..95a1e674fd67 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -3,7 +3,7 @@ # Makefile for the Linux/MIPS kernel. # -extra-y := vmlinux.lds +always-$(KBUILD_BUILTIN) := vmlinux.lds obj-y += head.o branch.o cmpxchg.o elf.o entry.o genex.o idle.o irq.o \ process.o prom.o ptrace.o reset.o setup.o signal.o \ diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index d1b11f66f748..1e29efcba46e 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -27,6 +27,12 @@ void output_ptreg_defines(void); void output_ptreg_defines(void) { COMMENT("MIPS pt_regs offsets."); +#ifdef CONFIG_32BIT + OFFSET(PT_ARG4, pt_regs, args[4]); + OFFSET(PT_ARG5, pt_regs, args[5]); + OFFSET(PT_ARG6, pt_regs, args[6]); + OFFSET(PT_ARG7, pt_regs, args[7]); +#endif OFFSET(PT_R0, pt_regs, regs[0]); OFFSET(PT_R1, pt_regs, regs[1]); OFFSET(PT_R2, pt_regs, regs[2]); @@ -101,6 +107,7 @@ void output_thread_info_defines(void) OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); OFFSET(TI_REGS, thread_info, regs); + OFFSET(TI_SYSCALL, thread_info, syscall); DEFINE(_THREAD_SIZE, THREAD_SIZE); DEFINE(_THREAD_MASK, THREAD_MASK); DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE); @@ -403,6 +410,9 @@ void output_cps_defines(void) { COMMENT(" MIPS CPS offsets. "); + OFFSET(CLUSTERBOOTCFG_CORECONFIG, cluster_boot_config, core_config); + DEFINE(CLUSTERBOOTCFG_SIZE, sizeof(struct cluster_boot_config)); + OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask); OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config); DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config)); diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c index d39a2963b451..2a14dc4ee57e 100644 --- a/arch/mips/kernel/cevt-bcm1480.c +++ b/arch/mips/kernel/cevt-bcm1480.c @@ -103,7 +103,7 @@ void sb1480_clockevent_init(void) BUG_ON(cpu > 3); /* Only have 4 general purpose timers */ - sprintf(name, "bcm1480-counter-%d", cpu); + sprintf(name, "bcm1480-counter-%u", cpu); cd->name = name; cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c index 9a47fbcd4638..de64d6bb7ba3 100644 --- a/arch/mips/kernel/cevt-ds1287.c +++ b/arch/mips/kernel/cevt-ds1287.c @@ -10,6 +10,7 @@ #include <linux/mc146818rtc.h> #include <linux/irq.h> +#include <asm/ds1287.h> #include <asm/time.h> int ds1287_timer_state(void) diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 368e8475870f..5f6e9e2ebbdb 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -303,13 +303,6 @@ int r4k_clockevent_init(void) if (!c0_compare_int_usable()) return -ENXIO; - /* - * With vectored interrupts things are getting platform specific. - * get_c0_compare_int is a hook to allow a platform to return the - * interrupt number of its liking. - */ - irq = get_c0_compare_int(); - cd = &per_cpu(mips_clockevent_device, cpu); cd->name = "MIPS"; @@ -320,7 +313,6 @@ int r4k_clockevent_init(void) min_delta = calculate_min_delta(); cd->rating = 300; - cd->irq = irq; cd->cpumask = cpumask_of(cpu); cd->set_next_event = mips_next_event; cd->event_handler = mips_event_handler; @@ -332,6 +324,13 @@ int r4k_clockevent_init(void) cp0_timer_irq_installed = 1; + /* + * With vectored interrupts things are getting platform specific. + * get_c0_compare_int is a hook to allow a platform to return the + * interrupt number of its liking. + */ + irq = get_c0_compare_int(); + if (request_irq(irq, c0_compare_interrupt, flags, "timer", c0_compare_interrupt)) pr_err("Failed to request irq %d (timer)\n", irq); diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c index e974a4954df8..c371def2302d 100644 --- a/arch/mips/kernel/cmpxchg.c +++ b/arch/mips/kernel/cmpxchg.c @@ -102,3 +102,4 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, return old; } } +EXPORT_SYMBOL(__cmpxchg_small); diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index f876309130ad..2ae7034a3d5c 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -19,6 +19,10 @@ #define GCR_CPC_BASE_OFS 0x0088 #define GCR_CL_COHERENCE_OFS 0x2008 #define GCR_CL_ID_OFS 0x2028 +#define CM3_GCR_Cx_ID_CLUSTER_SHF 8 +#define CM3_GCR_Cx_ID_CLUSTER_MSK (0xff << 8) +#define CM3_GCR_Cx_ID_CORENUM_SHF 0 +#define CM3_GCR_Cx_ID_CORENUM_MSK (0xff << 0) #define CPC_CL_VC_STOP_OFS 0x2020 #define CPC_CL_VC_RUN_OFS 0x2028 @@ -271,12 +275,21 @@ LEAF(mips_cps_core_init) */ LEAF(mips_cps_get_bootcfg) /* Calculate a pointer to this cores struct core_boot_config */ + PTR_LA v0, mips_cps_cluster_bootcfg + PTR_L v0, 0(v0) lw t0, GCR_CL_ID_OFS(s1) +#ifdef CONFIG_CPU_MIPSR6 + ext t1, t0, CM3_GCR_Cx_ID_CLUSTER_SHF, 8 + li t2, CLUSTERBOOTCFG_SIZE + mul t1, t1, t2 + PTR_ADDU \ + v0, v0, t1 +#endif + PTR_L v0, CLUSTERBOOTCFG_CORECONFIG(v0) + andi t0, t0, CM3_GCR_Cx_ID_CORENUM_MSK li t1, COREBOOTCFG_SIZE mul t0, t0, t1 - PTR_LA t1, mips_cps_core_bootcfg - PTR_L t1, 0(t1) - PTR_ADDU v0, t0, t1 + PTR_ADDU v0, v0, t0 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ li t9, 0 diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index bda7f193baab..af7412549e6e 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -1724,12 +1724,16 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */ + change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER, + LOONGSON_CONF6_INTIMER); break; case PRID_IMP_LOONGSON_64G: __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); set_isa(c, MIPS_CPU_ISA_M64R2); decode_cpucfg(c); + change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER, + LOONGSON_CONF6_INTIMER); break; default: panic("Unknown Loongson Processor ID!"); diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c index edc4afc080fa..59eca397f297 100644 --- a/arch/mips/kernel/csrc-r4k.c +++ b/arch/mips/kernel/csrc-r4k.c @@ -66,6 +66,18 @@ static bool rdhwr_count_usable(void) return false; } +static inline __init bool count_can_be_sched_clock(void) +{ + if (IS_ENABLED(CONFIG_CPU_FREQ)) + return false; + + if (num_possible_cpus() > 1 && + !IS_ENABLED(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK)) + return false; + + return true; +} + #ifdef CONFIG_CPU_FREQ static bool __read_mostly r4k_clock_unstable; @@ -111,7 +123,8 @@ int __init init_r4k_clocksource(void) return -ENXIO; /* Calculate a somewhat reasonable rating value */ - clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; + clocksource_mips.rating = 200; + clocksource_mips.rating += clamp(mips_hpt_frequency / 10000000, 0, 99); /* * R2 onwards makes the count accessible to user mode so it can be used @@ -122,9 +135,8 @@ int __init init_r4k_clocksource(void) clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); -#ifndef CONFIG_CPU_FREQ - sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); -#endif + if (count_can_be_sched_clock()) + sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); return 0; } diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index 7aa2c2360ff6..f0e7fe85a42a 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c @@ -318,6 +318,10 @@ void mips_set_personality_nan(struct arch_elf_state *state) t->thread.fpu.fcr31 = c->fpu_csr31; switch (state->nan_2008) { case 0: + if (!(c->fpu_msk31 & FPU_CSR_NAN2008)) + t->thread.fpu.fcr31 &= ~FPU_CSR_NAN2008; + if (!(c->fpu_msk31 & FPU_CSR_ABS2008)) + t->thread.fpu.fcr31 &= ~FPU_CSR_ABS2008; break; case 1: if (!(c->fpu_msk31 & FPU_CSR_NAN2008)) diff --git a/arch/mips/kernel/fpu-probe.c b/arch/mips/kernel/fpu-probe.c index e689d6a83234..6bf3f19b1c33 100644 --- a/arch/mips/kernel/fpu-probe.c +++ b/arch/mips/kernel/fpu-probe.c @@ -144,7 +144,7 @@ static void cpu_set_fpu_2008(struct cpuinfo_mips *c) * IEEE 754 conformance mode to use. Affects the NaN encoding and the * ABS.fmt/NEG.fmt execution mode. */ -static enum { STRICT, LEGACY, STD2008, RELAXED } ieee754 = STRICT; +static enum { STRICT, EMULATED, LEGACY, STD2008, RELAXED } ieee754 = STRICT; /* * Set the IEEE 754 NaN encodings and the ABS.fmt/NEG.fmt execution modes @@ -160,6 +160,7 @@ static void cpu_set_nofpu_2008(struct cpuinfo_mips *c) switch (ieee754) { case STRICT: + case EMULATED: if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | @@ -204,6 +205,10 @@ static void cpu_set_nan_2008(struct cpuinfo_mips *c) mips_use_nan_legacy = !cpu_has_nan_2008; mips_use_nan_2008 = !!cpu_has_nan_2008; break; + case EMULATED: + /* Pretend ABS2008/NAN2008 options are dynamic */ + c->fpu_msk31 &= ~(FPU_CSR_NAN2008 | FPU_CSR_ABS2008); + fallthrough; case RELAXED: mips_use_nan_legacy = true; mips_use_nan_2008 = true; @@ -226,6 +231,8 @@ static int __init ieee754_setup(char *s) return -1; else if (!strcmp(s, "strict")) ieee754 = STRICT; + else if (!strcmp(s, "emulated")) + ieee754 = EMULATED; else if (!strcmp(s, "legacy")) ieee754 = LEGACY; else if (!strcmp(s, "2008")) diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 8c401e42301c..f39e85fd58fa 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -248,7 +248,7 @@ int ftrace_disable_ftrace_graph_caller(void) #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ -unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long +static unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) { unsigned long sp, ip, tmp; diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index a572ce36a24f..08c0a01d9a29 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -104,48 +104,59 @@ handle_vcei: __FINIT - .align 5 /* 32 byte rollback region */ -LEAF(__r4k_wait) - .set push - .set noreorder - /* start of rollback region */ - LONG_L t0, TI_FLAGS($28) - nop - andi t0, _TIF_NEED_RESCHED - bnez t0, 1f - nop - nop - nop -#ifdef CONFIG_CPU_MICROMIPS - nop - nop - nop - nop -#endif + .section .cpuidle.text,"ax" + /* Align to 32 bytes for the maximum idle interrupt region size. */ + .align 5 +LEAF(r4k_wait) + /* Keep the ISA bit clear for calculations on local labels here. */ +0: .fill 0 + /* Start of idle interrupt region. */ + local_irq_enable + /* + * If an interrupt lands here, before going idle on the next + * instruction, we must *NOT* go idle since the interrupt could + * have set TIF_NEED_RESCHED or caused a timer to need resched. + * Fall through -- see skipover_handler below -- and have the + * idle loop take care of things. + */ +1: .fill 0 + /* The R2 EI/EHB sequence takes 8 bytes, otherwise pad up. */ + .if 1b - 0b > 32 + .error "overlong idle interrupt region" + .elseif 1b - 0b > 8 + .align 4 + .endif +2: .fill 0 + .equ r4k_wait_idle_size, 2b - 0b + /* End of idle interrupt region; size has to be a power of 2. */ .set MIPS_ISA_ARCH_LEVEL_RAW +r4k_wait_insn: wait - /* end of rollback region (the region size must be power of two) */ -1: +r4k_wait_exit: + .set mips0 + local_irq_disable jr ra - nop - .set pop - END(__r4k_wait) + END(r4k_wait) + .previous - .macro BUILD_ROLLBACK_PROLOGUE handler - FEXPORT(rollback_\handler) + .macro BUILD_SKIPOVER_PROLOGUE handler + FEXPORT(skipover_\handler) .set push .set noat MFC0 k0, CP0_EPC - PTR_LA k1, __r4k_wait - ori k0, 0x1f /* 32 byte rollback region */ - xori k0, 0x1f + /* Subtract/add 2 to let the ISA bit propagate through the mask. */ + PTR_LA k1, r4k_wait_insn - 2 + ori k0, r4k_wait_idle_size - 2 + .set noreorder bne k0, k1, \handler + PTR_ADDIU k0, r4k_wait_exit - r4k_wait_insn + 2 + .set reorder MTC0 k0, CP0_EPC .set pop .endm .align 5 -BUILD_ROLLBACK_PROLOGUE handle_int +BUILD_SKIPOVER_PROLOGUE handle_int NESTED(handle_int, PT_SIZE, sp) .cfi_signal_frame #ifdef CONFIG_TRACE_IRQFLAGS @@ -265,7 +276,7 @@ NESTED(except_vec_ejtag_debug, 0, sp) * This prototype is copied to ebase + n*IntCtl.VS and patched * to invoke the handler */ -BUILD_ROLLBACK_PROLOGUE except_vec_vi +BUILD_SKIPOVER_PROLOGUE except_vec_vi NESTED(except_vec_vi, 0, sp) SAVE_SOME docfi=1 SAVE_AT docfi=1 diff --git a/arch/mips/kernel/gpio_txx9.c b/arch/mips/kernel/gpio_txx9.c index 8c083612df9d..027fb57d0d79 100644 --- a/arch/mips/kernel/gpio_txx9.c +++ b/arch/mips/kernel/gpio_txx9.c @@ -32,14 +32,16 @@ static void txx9_gpio_set_raw(unsigned int offset, int value) __raw_writel(val, &txx9_pioptr->dout); } -static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) +static int txx9_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { unsigned long flags; spin_lock_irqsave(&txx9_gpio_lock, flags); txx9_gpio_set_raw(offset, value); mmiowb(); spin_unlock_irqrestore(&txx9_gpio_lock, flags); + + return 0; } static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset) @@ -68,7 +70,7 @@ static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset, static struct gpio_chip txx9_gpio_chip = { .get = txx9_gpio_get, - .set = txx9_gpio_set, + .set_rv = txx9_gpio_set, .direction_input = txx9_gpio_dir_in, .direction_output = txx9_gpio_dir_out, .label = "TXx9", diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index b825ed4476c7..d99ed58b7043 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -59,6 +59,8 @@ #endif .endm + __HEAD + #ifndef CONFIG_NO_EXCEPT_FILL /* * Reserved space for exception handlers. diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 5abc8b7340f8..80e8a04a642e 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -35,13 +35,6 @@ static void __cpuidle r3081_wait(void) write_c0_conf(cfg | R30XX_CONF_HALT); } -void __cpuidle r4k_wait(void) -{ - raw_local_irq_enable(); - __r4k_wait(); - raw_local_irq_disable(); -} - /* * This variant is preferable as it allows testing need_resched and going to * sleep depending on the outcome atomically. Unfortunately the "It is diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 3a115fab5573..43cb1e20baed 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -5,6 +5,7 @@ */ #include <linux/errno.h> +#include <linux/of.h> #include <linux/percpu.h> #include <linux/spinlock.h> @@ -14,6 +15,7 @@ void __iomem *mips_gcr_base; void __iomem *mips_cm_l2sync_base; int mips_cm_is64; +bool mips_cm_is_l2_hci_broken; static char *cm2_tr[8] = { "mem", "gcr", "gic", "mmio", @@ -237,6 +239,18 @@ static void mips_cm_probe_l2sync(void) mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE); } +void mips_cm_update_property(void) +{ + struct device_node *cm_node; + + cm_node = of_find_compatible_node(of_root, NULL, "mobileye,eyeq6-cm"); + if (!cm_node) + return; + pr_info("HCI (Hardware Cache Init for the L2 cache) in GCR_L2_RAM_CONFIG from the CM3 is broken"); + mips_cm_is_l2_hci_broken = true; + of_node_put(cm_node); +} + int mips_cm_probe(void) { phys_addr_t addr; @@ -308,7 +322,9 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core, FIELD_PREP(CM3_GCR_Cx_OTHER_VP, vp); if (cm_rev >= CM_REV_CM3_5) { - val |= CM_GCR_Cx_OTHER_CLUSTER_EN; + if (cluster != cpu_cluster(¤t_cpu_data)) + val |= CM_GCR_Cx_OTHER_CLUSTER_EN; + val |= CM_GCR_Cx_OTHER_GIC_EN; val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster); val |= FIELD_PREP(CM_GCR_Cx_OTHER_BLOCK, block); } else { @@ -512,3 +528,40 @@ void mips_cm_error_report(void) /* reprime cause register */ write_gcr_error_cause(cm_error); } + +unsigned int mips_cps_first_online_in_cluster(void) +{ + unsigned int local_cl; + int i; + + local_cl = cpu_cluster(¤t_cpu_data); + + /* + * We rely upon knowledge that CPUs are numbered sequentially by + * cluster - ie. CPUs 0..X will be in cluster 0, CPUs X+1..Y in cluster + * 1, CPUs Y+1..Z in cluster 2 etc. This means that CPUs in the same + * cluster will immediately precede or follow one another. + * + * First we scan backwards, until we find an online CPU in the cluster + * or we move on to another cluster. + */ + for (i = smp_processor_id() - 1; i >= 0; i--) { + if (cpu_cluster(&cpu_data[i]) != local_cl) + break; + if (!cpu_online(i)) + continue; + return false; + } + + /* Then do the same for higher numbered CPUs */ + for (i = smp_processor_id() + 1; i < nr_cpu_ids; i++) { + if (cpu_cluster(&cpu_data[i]) != local_cl) + break; + if (!cpu_online(i)) + continue; + return false; + } + + /* We found no online CPUs in the local cluster */ + return true; +} diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index c938ba208fc0..2ef610650a9e 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -43,83 +43,6 @@ static int __init maxtcs(char *str) __setup("maxtcs=", maxtcs); -/* - * Dump new MIPS MT state for the core. Does not leave TCs halted. - * Takes an argument which taken to be a pre-call MVPControl value. - */ - -void mips_mt_regdump(unsigned long mvpctl) -{ - unsigned long flags; - unsigned long vpflags; - unsigned long mvpconf0; - int nvpe; - int ntc; - int i; - int tc; - unsigned long haltval; - unsigned long tcstatval; - - local_irq_save(flags); - vpflags = dvpe(); - printk("=== MIPS MT State Dump ===\n"); - printk("-- Global State --\n"); - printk(" MVPControl Passed: %08lx\n", mvpctl); - printk(" MVPControl Read: %08lx\n", vpflags); - printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0())); - nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; - ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; - printk("-- per-VPE State --\n"); - for (i = 0; i < nvpe; i++) { - for (tc = 0; tc < ntc; tc++) { - settc(tc); - if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { - printk(" VPE %d\n", i); - printk(" VPEControl : %08lx\n", - read_vpe_c0_vpecontrol()); - printk(" VPEConf0 : %08lx\n", - read_vpe_c0_vpeconf0()); - printk(" VPE%d.Status : %08lx\n", - i, read_vpe_c0_status()); - printk(" VPE%d.EPC : %08lx %pS\n", - i, read_vpe_c0_epc(), - (void *) read_vpe_c0_epc()); - printk(" VPE%d.Cause : %08lx\n", - i, read_vpe_c0_cause()); - printk(" VPE%d.Config7 : %08lx\n", - i, read_vpe_c0_config7()); - break; /* Next VPE */ - } - } - } - printk("-- per-TC State --\n"); - for (tc = 0; tc < ntc; tc++) { - settc(tc); - if (read_tc_c0_tcbind() == read_c0_tcbind()) { - /* Are we dumping ourself? */ - haltval = 0; /* Then we're not halted, and mustn't be */ - tcstatval = flags; /* And pre-dump TCStatus is flags */ - printk(" TC %d (current TC with VPE EPC above)\n", tc); - } else { - haltval = read_tc_c0_tchalt(); - write_tc_c0_tchalt(1); - tcstatval = read_tc_c0_tcstatus(); - printk(" TC %d\n", tc); - } - printk(" TCStatus : %08lx\n", tcstatval); - printk(" TCBind : %08lx\n", read_tc_c0_tcbind()); - printk(" TCRestart : %08lx %pS\n", - read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart()); - printk(" TCHalt : %08lx\n", haltval); - printk(" TCContext : %08lx\n", read_tc_c0_tccontext()); - if (!haltval) - write_tc_c0_tchalt(0); - } - printk("===========================\n"); - evpe(vpflags); - local_irq_restore(flags); -} - static int mt_opt_rpsctl = -1; static int mt_opt_nblsu = -1; static int mt_opt_forceconfig7; @@ -199,9 +122,8 @@ void mips_mt_set_cpuoptions(void) unsigned long ectlval; unsigned long itcblkgrn; - /* ErrCtl register is known as "ecc" to Linux */ - ectlval = read_c0_ecc(); - write_c0_ecc(ectlval | (0x1 << 26)); + ectlval = read_c0_errctl(); + write_c0_errctl(ectlval | (0x1 << 26)); ehb(); #define INDEX_0 (0x80000000) #define INDEX_8 (0x80000008) @@ -222,7 +144,7 @@ void mips_mt_set_cpuoptions(void) ehb(); /* Write out to ITU with CACHE op */ cache_op(Index_Store_Tag_D, INDEX_0); - write_c0_ecc(ectlval); + write_c0_errctl(ectlval); ehb(); printk("Mapped %ld ITC cells starting at 0x%08x\n", ((itcblkgrn & 0x7fe00000) >> 20), itc_base); diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index 7b2fbaa9cac5..ba0f62d8eff5 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c @@ -13,7 +13,6 @@ #include <linux/elf.h> #include <linux/mm.h> #include <linux/numa.h> -#include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/string.h> @@ -31,15 +30,6 @@ struct mips_hi16 { static LIST_HEAD(dbe_list); static DEFINE_SPINLOCK(dbe_lock); -#ifdef MODULE_START -void *module_alloc(unsigned long size) -{ - return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END, - GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, - __builtin_return_address(0)); -} -#endif - static void apply_r_mips_32(u32 *location, u32 base, Elf_Addr v) { *location = base + v; diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index c4d6b09136b1..196a070349b0 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -791,8 +791,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc, if (!mipspmu_event_set_period(event, hwc, idx)) return; - if (perf_event_overflow(event, data, regs)) - mipsxx_pmu_disable_event(idx); + perf_event_overflow(event, data, regs); } diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index d09ca77e624d..3de0e05e0511 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -57,10 +57,7 @@ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); /* Indicates online CPUs coupled with the current CPU */ static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); -/* - * Used to synchronize entry to deep idle states. Actually per-core rather - * than per-CPU. - */ +/* Used to synchronize entry to deep idle states */ static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); /* Saved CPU state across the CPS_PM_POWER_GATED state */ @@ -104,17 +101,20 @@ static void coupled_barrier(atomic_t *a, unsigned online) int cps_pm_enter_state(enum cps_pm_state state) { unsigned cpu = smp_processor_id(); + unsigned int cluster = cpu_cluster(¤t_cpu_data); unsigned core = cpu_core(¤t_cpu_data); unsigned online, left; cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); u32 *core_ready_count, *nc_core_ready_count; void *nc_addr; cps_nc_entry_fn entry; + struct cluster_boot_config *cluster_cfg; struct core_boot_config *core_cfg; struct vpe_boot_config *vpe_cfg; + atomic_t *barrier; /* Check that there is an entry function for this state */ - entry = per_cpu(nc_asm_enter, core)[state]; + entry = per_cpu(nc_asm_enter, cpu)[state]; if (!entry) return -EINVAL; @@ -138,7 +138,8 @@ int cps_pm_enter_state(enum cps_pm_state state) if (!mips_cps_smp_in_use()) return -EINVAL; - core_cfg = &mips_cps_core_bootcfg[core]; + cluster_cfg = &mips_cps_cluster_bootcfg[cluster]; + core_cfg = &cluster_cfg->core_config[core]; vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(¤t_cpu_data)]; vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; vpe_cfg->gp = (unsigned long)current_thread_info(); @@ -150,7 +151,7 @@ int cps_pm_enter_state(enum cps_pm_state state) smp_mb__after_atomic(); /* Create a non-coherent mapping of the core ready_count */ - core_ready_count = per_cpu(ready_count, core); + core_ready_count = per_cpu(ready_count, cpu); nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), (unsigned long)core_ready_count); nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); @@ -158,7 +159,8 @@ int cps_pm_enter_state(enum cps_pm_state state) /* Ensure ready_count is zero-initialised before the assembly runs */ WRITE_ONCE(*nc_core_ready_count, 0); - coupled_barrier(&per_cpu(pm_barrier, core), online); + barrier = &per_cpu(pm_barrier, cpumask_first(&cpu_sibling_map[cpu])); + coupled_barrier(barrier, online); /* Run the generated entry code */ left = entry(online, nc_core_ready_count); @@ -629,12 +631,14 @@ out_err: static int cps_pm_online_cpu(unsigned int cpu) { - enum cps_pm_state state; - unsigned core = cpu_core(&cpu_data[cpu]); + unsigned int sibling, core; void *entry_fn, *core_rc; + enum cps_pm_state state; + + core = cpu_core(&cpu_data[cpu]); for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { - if (per_cpu(nc_asm_enter, core)[state]) + if (per_cpu(nc_asm_enter, cpu)[state]) continue; if (!test_bit(state, state_support)) continue; @@ -646,16 +650,19 @@ static int cps_pm_online_cpu(unsigned int cpu) clear_bit(state, state_support); } - per_cpu(nc_asm_enter, core)[state] = entry_fn; + for_each_cpu(sibling, &cpu_sibling_map[cpu]) + per_cpu(nc_asm_enter, sibling)[state] = entry_fn; } - if (!per_cpu(ready_count, core)) { + if (!per_cpu(ready_count, cpu)) { core_rc = kmalloc(sizeof(u32), GFP_KERNEL); if (!core_rc) { pr_err("Failed allocate core %u ready_count\n", core); return -ENOMEM; } - per_cpu(ready_count, core) = core_rc; + + for_each_cpu(sibling, &cpu_sibling_map[cpu]) + per_cpu(ready_count, sibling) = core_rc; } return 0; diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 8eba5a1ed664..8f0a0001540c 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -66,24 +66,23 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", cpu_data[n].udelay_val / (500000/HZ), (cpu_data[n].udelay_val / (5000/HZ)) % 100); - seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no"); + seq_printf(m, "wait instruction\t: %s\n", str_yes_no(cpu_wait)); seq_printf(m, "microsecond timers\t: %s\n", - cpu_has_counter ? "yes" : "no"); + str_yes_no(cpu_has_counter)); seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize); seq_printf(m, "extra interrupt vector\t: %s\n", - cpu_has_divec ? "yes" : "no"); - seq_printf(m, "hardware watchpoint\t: %s", - cpu_has_watch ? "yes, " : "no\n"); + str_yes_no(cpu_has_divec)); + seq_printf(m, "hardware watchpoint\t: %s", str_yes_no(cpu_has_watch)); if (cpu_has_watch) { - seq_printf(m, "count: %d, address/irw mask: [", + seq_printf(m, ", count: %d, address/irw mask: [", cpu_data[n].watch_reg_count); for (i = 0; i < cpu_data[n].watch_reg_count; i++) seq_printf(m, "%s0x%04x", i ? ", " : "", cpu_data[n].watch_reg_masks[i]); - seq_puts(m, "]\n"); + seq_puts(m, "]"); } - seq_puts(m, "isa\t\t\t:"); + seq_puts(m, "\nisa\t\t\t:"); if (cpu_has_mips_1) seq_puts(m, " mips1"); if (cpu_has_mips_2) @@ -155,7 +154,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_mmips) { seq_printf(m, "micromips kernel\t: %s\n", - (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no"); + str_yes_no(read_c0_config3() & MIPS_CONF3_ISA_OE)); } seq_puts(m, "Options implemented\t:"); diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 6062e6fa589a..4fd6da0a06c3 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -41,7 +41,7 @@ char *mips_get_machine_name(void) void __init __dt_setup_arch(void *bph) { - if (!early_init_dt_scan(bph)) + if (!early_init_dt_scan(bph, __pa(bph))) return; mips_set_machine_name(of_flat_dt_get_machine_name()); diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 59288c13b581..b890d64d352c 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -922,11 +922,13 @@ static const struct pt_regs_offset regoffset_table[] = { */ int regs_query_register_offset(const char *name) { - const struct pt_regs_offset *roff; - for (roff = regoffset_table; roff->name != NULL; roff++) - if (!strcmp(roff->name, name)) - return roff->offset; - return -EINVAL; + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + + return -EINVAL; } #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) @@ -937,7 +939,7 @@ static const struct user_regset mips_regsets[] = { .n = ELF_NGREG, .size = sizeof(unsigned int), .align = sizeof(unsigned int), - .regset_get = gpr32_get, + .regset_get = gpr32_get, .set = gpr32_set, }, [REGSET_DSP] = { @@ -945,7 +947,7 @@ static const struct user_regset mips_regsets[] = { .n = NUM_DSP_REGS + 1, .size = sizeof(u32), .align = sizeof(u32), - .regset_get = dsp32_get, + .regset_get = dsp32_get, .set = dsp32_set, .active = dsp_active, }, @@ -955,7 +957,7 @@ static const struct user_regset mips_regsets[] = { .n = ELF_NFPREG, .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), - .regset_get = fpr_get, + .regset_get = fpr_get, .set = fpr_set, }, [REGSET_FP_MODE] = { @@ -963,7 +965,7 @@ static const struct user_regset mips_regsets[] = { .n = 1, .size = sizeof(int), .align = sizeof(int), - .regset_get = fp_mode_get, + .regset_get = fp_mode_get, .set = fp_mode_set, }, #endif @@ -973,7 +975,7 @@ static const struct user_regset mips_regsets[] = { .n = NUM_FPU_REGS + 1, .size = 16, .align = 16, - .regset_get = msa_get, + .regset_get = msa_get, .set = msa_set, }, #endif @@ -997,7 +999,7 @@ static const struct user_regset mips64_regsets[] = { .n = ELF_NGREG, .size = sizeof(unsigned long), .align = sizeof(unsigned long), - .regset_get = gpr64_get, + .regset_get = gpr64_get, .set = gpr64_set, }, [REGSET_DSP] = { @@ -1005,7 +1007,7 @@ static const struct user_regset mips64_regsets[] = { .n = NUM_DSP_REGS + 1, .size = sizeof(u64), .align = sizeof(u64), - .regset_get = dsp64_get, + .regset_get = dsp64_get, .set = dsp64_set, .active = dsp_active, }, @@ -1015,7 +1017,7 @@ static const struct user_regset mips64_regsets[] = { .n = 1, .size = sizeof(int), .align = sizeof(int), - .regset_get = fp_mode_get, + .regset_get = fp_mode_get, .set = fp_mode_set, }, [REGSET_FPR] = { @@ -1023,7 +1025,7 @@ static const struct user_regset mips64_regsets[] = { .n = ELF_NFPREG, .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), - .regset_get = fpr_get, + .regset_get = fpr_get, .set = fpr_set, }, #endif @@ -1033,7 +1035,7 @@ static const struct user_regset mips64_regsets[] = { .n = NUM_FPU_REGS + 1, .size = 16, .align = 16, - .regset_get = msa_get, + .regset_get = msa_get, .set = msa_set, }, #endif @@ -1317,51 +1319,32 @@ long arch_ptrace(struct task_struct *child, long request, * Notification of system call entry/exit * - triggered by current->work.syscall_trace */ -asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) +asmlinkage long syscall_trace_enter(struct pt_regs *regs) { user_exit(); - current_thread_info()->syscall = syscall; - if (test_thread_flag(TIF_SYSCALL_TRACE)) { if (ptrace_report_syscall_entry(regs)) return -1; - syscall = current_thread_info()->syscall; } -#ifdef CONFIG_SECCOMP - if (unlikely(test_thread_flag(TIF_SECCOMP))) { - int ret, i; - struct seccomp_data sd; - unsigned long args[6]; - - sd.nr = syscall; - sd.arch = syscall_get_arch(current); - syscall_get_arguments(current, regs, args); - for (i = 0; i < 6; i++) - sd.args[i] = args[i]; - sd.instruction_pointer = KSTK_EIP(current); - - ret = __secure_computing(&sd); - if (ret == -1) - return ret; - syscall = current_thread_info()->syscall; - } -#endif + if (secure_computing()) + return -1; if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->regs[2]); - audit_syscall_entry(syscall, regs->regs[4], regs->regs[5], + audit_syscall_entry(current_thread_info()->syscall, + regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); /* * Negative syscall numbers are mistaken for rejected syscalls, but * won't have had the return value set appropriately, so we do so now. */ - if (syscall < 0) + if (current_thread_info()->syscall < 0) syscall_set_return_value(current, regs, -ENOSYS, 0); - return syscall; + return current_thread_info()->syscall; } /* @@ -1370,7 +1353,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) */ asmlinkage void syscall_trace_leave(struct pt_regs *regs) { - /* + /* * We may come here right after calling schedule_user() * or do_notify_resume(), in which case we can be in RCU * user mode. diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c index 7eeeaf1ff95d..cda7983e7c18 100644 --- a/arch/mips/kernel/relocate.c +++ b/arch/mips/kernel/relocate.c @@ -337,7 +337,7 @@ void *__init relocate_kernel(void) #if defined(CONFIG_USE_OF) /* Deal with the device tree */ fdt = plat_get_fdt(); - early_init_dt_scan(fdt); + early_init_dt_scan(fdt, __pa(fdt)); if (boot_command_line[0]) { /* Boot command line was passed in device tree */ strscpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 18dc9b345056..4947a4f39e37 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -64,10 +64,10 @@ load_a6: user_lw(t7, 24(t0)) # argument #7 from usp load_a7: user_lw(t8, 28(t0)) # argument #8 from usp loads_done: - sw t5, 16(sp) # argument #5 to ksp - sw t6, 20(sp) # argument #6 to ksp - sw t7, 24(sp) # argument #7 to ksp - sw t8, 28(sp) # argument #8 to ksp + sw t5, PT_ARG4(sp) # argument #5 to ksp + sw t6, PT_ARG5(sp) # argument #6 to ksp + sw t7, PT_ARG6(sp) # argument #7 to ksp + sw t8, PT_ARG7(sp) # argument #8 to ksp .set pop .section __ex_table,"a" @@ -77,6 +77,18 @@ loads_done: PTR_WD load_a7, bad_stack_a7 .previous + /* + * syscall number is in v0 unless we called syscall(__NR_###) + * where the real syscall number is in a0 + */ + subu t2, v0, __NR_O32_Linux + bnez t2, 1f /* __NR_syscall at offset 0 */ + LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number + b 2f +1: + LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number +2: + lw t0, TI_FLAGS($28) # syscall tracing enabled? li t1, _TIF_WORK_SYSCALL_ENTRY and t0, t1 @@ -114,16 +126,7 @@ syscall_trace_entry: SAVE_STATIC move a0, sp - /* - * syscall number is in v0 unless we called syscall(__NR_###) - * where the real syscall number is in a0 - */ - move a1, v0 - subu t2, v0, __NR_O32_Linux - bnez t2, 1f /* __NR_syscall at offset 0 */ - lw a1, PT_R4(sp) - -1: jal syscall_trace_enter + jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 97456b2ca7dc..97788859238c 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -44,6 +44,8 @@ NESTED(handle_sysn32, PT_SIZE, sp) sd a3, PT_R26(sp) # save a3 for syscall restarting + LONG_S v0, TI_SYSCALL($28) # Store syscall number + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 @@ -72,7 +74,6 @@ syscall_common: n32_syscall_trace_entry: SAVE_STATIC move a0, sp - move a1, v0 jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S index e6264aa62e45..be11ea5cc67e 100644 --- a/arch/mips/kernel/scall64-n64.S +++ b/arch/mips/kernel/scall64-n64.S @@ -46,6 +46,8 @@ NESTED(handle_sys64, PT_SIZE, sp) sd a3, PT_R26(sp) # save a3 for syscall restarting + LONG_S v0, TI_SYSCALL($28) # Store syscall number + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 @@ -82,7 +84,6 @@ n64_syscall_exit: syscall_trace_entry: SAVE_STATIC move a0, sp - move a1, v0 jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index d3c2616cba22..7a5abb73e531 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -79,6 +79,22 @@ loads_done: PTR_WD load_a7, bad_stack_a7 .previous + /* + * absolute syscall number is in v0 unless we called syscall(__NR_###) + * where the real syscall number is in a0 + * note: NR_syscall is the first O32 syscall but the macro is + * only defined when compiling with -mabi=32 (CONFIG_32BIT) + * therefore __NR_O32_Linux is used (4000) + */ + + subu t2, v0, __NR_O32_Linux + bnez t2, 1f /* __NR_syscall at offset 0 */ + LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number + b 2f +1: + LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number +2: + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 @@ -113,22 +129,7 @@ trace_a_syscall: sd a7, PT_R11(sp) # For indirect syscalls move a0, sp - /* - * absolute syscall number is in v0 unless we called syscall(__NR_###) - * where the real syscall number is in a0 - * note: NR_syscall is the first O32 syscall but the macro is - * only defined when compiling with -mabi=32 (CONFIG_32BIT) - * therefore __NR_O32_Linux is used (4000) - */ - .set push - .set reorder - subu t1, v0, __NR_O32_Linux - move a1, v0 - bnez t1, 1f /* __NR_syscall at offset 0 */ - ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */ - .set pop - -1: jal syscall_trace_enter + jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 12a1a4ffb602..fbfe0771317e 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -704,10 +704,7 @@ static void __init resource_init(void) for_each_mem_range(i, &start, &end) { struct resource *res; - res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); - if (!res) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(struct resource)); + res = memblock_alloc_or_panic(sizeof(struct resource), SMP_CACHE_BYTES); res->start = start; /* diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index b3dbf9ecb0d6..35b8d810833c 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -518,7 +518,7 @@ static void bmips_set_reset_vec(int cpu, u32 val) info.val = val; bmips_set_reset_vec_remote(&info); } else { - void __iomem *cbr = BMIPS_GET_CBR(); + void __iomem *cbr = bmips_cbr_addr; if (cpu == 0) __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_0); @@ -591,7 +591,8 @@ asmlinkage void __weak plat_wired_tlb_setup(void) void bmips_cpu_setup(void) { - void __iomem __maybe_unused *cbr = BMIPS_GET_CBR(); + void __iomem __maybe_unused *cbr = bmips_cbr_addr; + u32 __maybe_unused rac_addr; u32 __maybe_unused cfg; switch (current_cpu_type()) { @@ -620,6 +621,23 @@ void bmips_cpu_setup(void) __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE); break; + case CPU_BMIPS4350: + rac_addr = BMIPS_RAC_CONFIG_1; + + if (!(read_c0_brcm_cmt_local() & (1 << 31))) + rac_addr = BMIPS_RAC_CONFIG; + + /* Enable data RAC */ + cfg = __raw_readl(cbr + rac_addr); + __raw_writel(cfg | 0xf, cbr + rac_addr); + __raw_readl(cbr + rac_addr); + + /* Flush stale data out of the readahead cache */ + cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG); + __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG); + __raw_readl(cbr + BMIPS_RAC_CONFIG); + break; + case CPU_BMIPS4380: /* CBG workaround for early BMIPS4380 CPUs */ switch (read_c0_prid()) { diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 9cc087dd1c19..7b0e69af4097 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -36,11 +36,55 @@ enum label_id { UASM_L_LA(_not_nmi) -static DECLARE_BITMAP(core_power, NR_CPUS); -static uint32_t core_entry_reg; +static u64 core_entry_reg; static phys_addr_t cps_vec_pa; -struct core_boot_config *mips_cps_core_bootcfg; +struct cluster_boot_config *mips_cps_cluster_bootcfg; + +static void power_up_other_cluster(unsigned int cluster) +{ + u32 stat, seq_state; + unsigned int timeout; + + mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0, + CM_GCR_Cx_OTHER_BLOCK_LOCAL); + stat = read_cpc_co_stat_conf(); + mips_cm_unlock_other(); + + seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE; + seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); + if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5) + return; + + /* Set endianness & power up the CM */ + mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + write_cpc_redir_sys_config(IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)); + write_cpc_redir_pwrup_ctl(1); + mips_cm_unlock_other(); + + /* Wait for the CM to start up */ + timeout = 1000; + mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0, + CM_GCR_Cx_OTHER_BLOCK_LOCAL); + while (1) { + stat = read_cpc_co_stat_conf(); + seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE; + seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); + if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5) + break; + + if (timeout) { + mdelay(1); + timeout--; + } else { + pr_warn("Waiting for cluster %u CM to power up... STAT_CONF=0x%x\n", + cluster, stat); + mdelay(1000); + } + } + + mips_cm_unlock_other(); +} static unsigned __init core_vpe_count(unsigned int cluster, unsigned core) { @@ -94,6 +138,20 @@ static void __init *mips_cps_build_core_entry(void *addr) return p; } +static bool __init check_64bit_reset(void) +{ + bool cx_64bit_reset = false; + + mips_cm_lock_other(0, 0, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); + write_gcr_co_reset64_base(CM_GCR_Cx_RESET64_BASE_BEVEXCBASE); + if ((read_gcr_co_reset64_base() & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) == + CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) + cx_64bit_reset = true; + mips_cm_unlock_other(); + + return cx_64bit_reset; +} + static int __init allocate_cps_vecs(void) { /* Try to allocate in KSEG1 first */ @@ -105,11 +163,23 @@ static int __init allocate_cps_vecs(void) CM_GCR_Cx_RESET_BASE_BEVEXCBASE; if (!cps_vec_pa && mips_cm_is64) { - cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, - 0x0, SZ_4G - 1); - if (cps_vec_pa) - core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) | + phys_addr_t end; + + if (check_64bit_reset()) { + pr_info("VP Local Reset Exception Base support 47 bits address\n"); + end = MEMBLOCK_ALLOC_ANYWHERE; + } else { + end = SZ_4G - 1; + } + cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, 0, end); + if (cps_vec_pa) { + if (check_64bit_reset()) + core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) | + CM_GCR_Cx_RESET_BASE_MODE; + else + core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) | CM_GCR_Cx_RESET_BASE_MODE; + } } if (!cps_vec_pa) @@ -152,6 +222,9 @@ static void __init cps_smp_setup(void) pr_cont(","); pr_cont("{"); + if (mips_cm_revision() >= CM_REV_CM3_5) + power_up_other_cluster(cl); + ncores = mips_cps_numcores(cl); for (c = 0; c < ncores; c++) { core_vpes = core_vpe_count(cl, c); @@ -163,6 +236,7 @@ static void __init cps_smp_setup(void) /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */ if (!cl && !c) smp_num_siblings = core_vpes; + cpumask_set_cpu(nvpes, &__cpu_primary_thread_mask); for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { cpu_set_cluster(&cpu_data[nvpes + v], cl); @@ -179,8 +253,8 @@ static void __init cps_smp_setup(void) /* Indicate present CPUs (CPU being synonymous with VPE) */ for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { - set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0); - set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0); + set_cpu_possible(v, true); + set_cpu_present(v, true); __cpu_number_map[v] = v; __cpu_logical_map[v] = v; } @@ -188,9 +262,6 @@ static void __init cps_smp_setup(void) /* Set a coherent default CCA (CWB) */ change_c0_config(CONF_CM_CMASK, 0x5); - /* Core 0 is powered up (we're running on it) */ - bitmap_set(core_power, 0, 1); - /* Initialise core 0 */ mips_cps_core_init(); @@ -212,8 +283,10 @@ static void __init cps_smp_setup(void) static void __init cps_prepare_cpus(unsigned int max_cpus) { - unsigned ncores, core_vpes, c, cca; + unsigned int nclusters, ncores, core_vpes, c, cl, cca; bool cca_unsuitable, cores_limited; + struct cluster_boot_config *cluster_bootcfg; + struct core_boot_config *core_bootcfg; mips_mt_set_cpuoptions(); @@ -255,40 +328,67 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) setup_cps_vecs(); - /* Allocate core boot configuration structs */ - ncores = mips_cps_numcores(0); - mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), - GFP_KERNEL); - if (!mips_cps_core_bootcfg) { - pr_err("Failed to allocate boot config for %u cores\n", ncores); + /* Allocate cluster boot configuration structs */ + nclusters = mips_cps_numclusters(); + mips_cps_cluster_bootcfg = kcalloc(nclusters, + sizeof(*mips_cps_cluster_bootcfg), + GFP_KERNEL); + if (!mips_cps_cluster_bootcfg) goto err_out; - } - /* Allocate VPE boot configuration structs */ - for (c = 0; c < ncores; c++) { - core_vpes = core_vpe_count(0, c); - mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, - sizeof(*mips_cps_core_bootcfg[c].vpe_config), + if (nclusters > 1) + mips_cm_update_property(); + + for (cl = 0; cl < nclusters; cl++) { + /* Allocate core boot configuration structs */ + ncores = mips_cps_numcores(cl); + core_bootcfg = kcalloc(ncores, sizeof(*core_bootcfg), + GFP_KERNEL); + if (!core_bootcfg) + goto err_out; + mips_cps_cluster_bootcfg[cl].core_config = core_bootcfg; + + mips_cps_cluster_bootcfg[cl].core_power = + kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long), GFP_KERNEL); - if (!mips_cps_core_bootcfg[c].vpe_config) { - pr_err("Failed to allocate %u VPE boot configs\n", - core_vpes); + if (!mips_cps_cluster_bootcfg[cl].core_power) goto err_out; + + /* Allocate VPE boot configuration structs */ + for (c = 0; c < ncores; c++) { + core_vpes = core_vpe_count(cl, c); + core_bootcfg[c].vpe_config = kcalloc(core_vpes, + sizeof(*core_bootcfg[c].vpe_config), + GFP_KERNEL); + if (!core_bootcfg[c].vpe_config) + goto err_out; } } - /* Mark this CPU as booted */ - atomic_set(&mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)].vpe_mask, - 1 << cpu_vpe_id(¤t_cpu_data)); + /* Mark this CPU as powered up & booted */ + cl = cpu_cluster(¤t_cpu_data); + c = cpu_core(¤t_cpu_data); + cluster_bootcfg = &mips_cps_cluster_bootcfg[cl]; + cpu_smt_set_num_threads(core_vpes, core_vpes); + core_bootcfg = &cluster_bootcfg->core_config[c]; + bitmap_set(cluster_bootcfg->core_power, cpu_core(¤t_cpu_data), 1); + atomic_set(&core_bootcfg->vpe_mask, 1 << cpu_vpe_id(¤t_cpu_data)); return; err_out: /* Clean up allocations */ - if (mips_cps_core_bootcfg) { - for (c = 0; c < ncores; c++) - kfree(mips_cps_core_bootcfg[c].vpe_config); - kfree(mips_cps_core_bootcfg); - mips_cps_core_bootcfg = NULL; + if (mips_cps_cluster_bootcfg) { + for (cl = 0; cl < nclusters; cl++) { + cluster_bootcfg = &mips_cps_cluster_bootcfg[cl]; + ncores = mips_cps_numcores(cl); + for (c = 0; c < ncores; c++) { + core_bootcfg = &cluster_bootcfg->core_config[c]; + kfree(core_bootcfg->vpe_config); + } + kfree(mips_cps_cluster_bootcfg[c].core_config); + } + kfree(mips_cps_cluster_bootcfg); + mips_cps_cluster_bootcfg = NULL; } /* Effectively disable SMP by declaring CPUs not present */ @@ -299,16 +399,124 @@ err_out: } } -static void boot_core(unsigned int core, unsigned int vpe_id) +static void init_cluster_l2(void) { - u32 stat, seq_state; - unsigned timeout; + u32 l2_cfg, l2sm_cop, result; + + while (!mips_cm_is_l2_hci_broken) { + l2_cfg = read_gcr_redir_l2_ram_config(); + + /* If HCI is not supported, use the state machine below */ + if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_PRESENT)) + break; + if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED)) + break; + + /* If the HCI_DONE bit is set, we're finished */ + if (l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_DONE) + return; + } + + l2sm_cop = read_gcr_redir_l2sm_cop(); + if (WARN(!(l2sm_cop & CM_GCR_L2SM_COP_PRESENT), + "L2 init not supported on this system yet")) + return; + + /* Clear L2 tag registers */ + write_gcr_redir_l2_tag_state(0); + write_gcr_redir_l2_ecc(0); + + /* Ensure the L2 tag writes complete before the state machine starts */ + mb(); + + /* Wait for the L2 state machine to be idle */ + do { + l2sm_cop = read_gcr_redir_l2sm_cop(); + } while (l2sm_cop & CM_GCR_L2SM_COP_RUNNING); + + /* Start a store tag operation */ + l2sm_cop = CM_GCR_L2SM_COP_TYPE_IDX_STORETAG; + l2sm_cop <<= __ffs(CM_GCR_L2SM_COP_TYPE); + l2sm_cop |= CM_GCR_L2SM_COP_CMD_START; + write_gcr_redir_l2sm_cop(l2sm_cop); + + /* Ensure the state machine starts before we poll for completion */ + mb(); + + /* Wait for the operation to be complete */ + do { + l2sm_cop = read_gcr_redir_l2sm_cop(); + result = l2sm_cop & CM_GCR_L2SM_COP_RESULT; + result >>= __ffs(CM_GCR_L2SM_COP_RESULT); + } while (!result); + + WARN(result != CM_GCR_L2SM_COP_RESULT_DONE_OK, + "L2 state machine failed cache init with error %u\n", result); +} + +static void boot_core(unsigned int cluster, unsigned int core, + unsigned int vpe_id) +{ + struct cluster_boot_config *cluster_cfg; + u32 access, stat, seq_state; + unsigned int timeout, ncores; + + cluster_cfg = &mips_cps_cluster_bootcfg[cluster]; + ncores = mips_cps_numcores(cluster); + + if ((cluster != cpu_cluster(¤t_cpu_data)) && + bitmap_empty(cluster_cfg->core_power, ncores)) { + power_up_other_cluster(cluster); + + mips_cm_lock_other(cluster, core, 0, + CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + + /* Ensure cluster GCRs are where we expect */ + write_gcr_redir_base(read_gcr_base()); + write_gcr_redir_cpc_base(read_gcr_cpc_base()); + write_gcr_redir_gic_base(read_gcr_gic_base()); + + init_cluster_l2(); + + /* Mirror L2 configuration */ + write_gcr_redir_l2_only_sync_base(read_gcr_l2_only_sync_base()); + write_gcr_redir_l2_pft_control(read_gcr_l2_pft_control()); + write_gcr_redir_l2_pft_control_b(read_gcr_l2_pft_control_b()); + + /* Mirror ECC/parity setup */ + write_gcr_redir_err_control(read_gcr_err_control()); + + /* Set BEV base */ + write_gcr_redir_bev_base(core_entry_reg); + + mips_cm_unlock_other(); + } + + if (cluster != cpu_cluster(¤t_cpu_data)) { + mips_cm_lock_other(cluster, core, 0, + CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + + /* Ensure the core can access the GCRs */ + access = read_gcr_redir_access(); + access |= BIT(core); + write_gcr_redir_access(access); + + mips_cm_unlock_other(); + } else { + /* Ensure the core can access the GCRs */ + access = read_gcr_access(); + access |= BIT(core); + write_gcr_access(access); + } /* Select the appropriate core */ - mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); + mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); /* Set its reset vector */ - write_gcr_co_reset_base(core_entry_reg); + if (mips_cm_is64) + write_gcr_co_reset64_base(core_entry_reg); + else + write_gcr_co_reset_base(core_entry_reg); /* Ensure its coherency is disabled */ write_gcr_co_coherence(0); @@ -317,7 +525,10 @@ static void boot_core(unsigned int core, unsigned int vpe_id) write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB); /* Ensure the core can access the GCRs */ - set_gcr_access(1 << core); + if (mips_cm_revision() < CM_REV_CM3) + set_gcr_access(1 << core); + else + set_gcr_access_cm3(1 << core); if (mips_cpc_present()) { /* Reset the core */ @@ -368,30 +579,42 @@ static void boot_core(unsigned int core, unsigned int vpe_id) mips_cm_unlock_other(); /* The core is now powered up */ - bitmap_set(core_power, core, 1); + bitmap_set(cluster_cfg->core_power, core, 1); + + /* + * Restore CM_PWRUP=0 so that the CM can power down if all the cores in + * the cluster do (eg. if they're all removed via hotplug. + */ + if (mips_cm_revision() >= CM_REV_CM3_5) { + mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + write_cpc_redir_pwrup_ctl(0); + mips_cm_unlock_other(); + } } static void remote_vpe_boot(void *dummy) { + unsigned int cluster = cpu_cluster(¤t_cpu_data); unsigned core = cpu_core(¤t_cpu_data); - struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; + struct cluster_boot_config *cluster_cfg = + &mips_cps_cluster_bootcfg[cluster]; + struct core_boot_config *core_cfg = &cluster_cfg->core_config[core]; mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data)); } static int cps_boot_secondary(int cpu, struct task_struct *idle) { + unsigned int cluster = cpu_cluster(&cpu_data[cpu]); unsigned core = cpu_core(&cpu_data[cpu]); unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); - struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; + struct cluster_boot_config *cluster_cfg = + &mips_cps_cluster_bootcfg[cluster]; + struct core_boot_config *core_cfg = &cluster_cfg->core_config[core]; struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; unsigned int remote; int err; - /* We don't yet support booting CPUs in other clusters */ - if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data)) - return -ENOSYS; - vpe_cfg->pc = (unsigned long)&smp_bootstrap; vpe_cfg->sp = __KSTK_TOS(idle); vpe_cfg->gp = (unsigned long)task_thread_info(idle); @@ -400,15 +623,19 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle) preempt_disable(); - if (!test_bit(core, core_power)) { + if (!test_bit(core, cluster_cfg->core_power)) { /* Boot a VPE on a powered down core */ - boot_core(core, vpe_id); + boot_core(cluster, core, vpe_id); goto out; } if (cpu_has_vp) { - mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); - write_gcr_co_reset_base(core_entry_reg); + mips_cm_lock_other(cluster, core, vpe_id, + CM_GCR_Cx_OTHER_BLOCK_LOCAL); + if (mips_cm_is64) + write_gcr_co_reset64_base(core_entry_reg); + else + write_gcr_co_reset_base(core_entry_reg); mips_cm_unlock_other(); } @@ -541,12 +768,14 @@ static void cps_kexec_nonboot_cpu(void) static int cps_cpu_disable(void) { unsigned cpu = smp_processor_id(); + struct cluster_boot_config *cluster_cfg; struct core_boot_config *core_cfg; if (!cps_pm_support_state(CPS_PM_POWER_GATED)) return -EINVAL; - core_cfg = &mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)]; + cluster_cfg = &mips_cps_cluster_bootcfg[cpu_cluster(¤t_cpu_data)]; + core_cfg = &cluster_cfg->core_config[cpu_core(¤t_cpu_data)]; atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); smp_mb__after_atomic(); set_cpu_online(cpu, false); @@ -612,11 +841,15 @@ static void cps_cpu_die(unsigned int cpu) { } static void cps_cleanup_dead_cpu(unsigned cpu) { + unsigned int cluster = cpu_cluster(&cpu_data[cpu]); unsigned core = cpu_core(&cpu_data[cpu]); unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]); ktime_t fail_time; unsigned stat; int err; + struct cluster_boot_config *cluster_cfg; + + cluster_cfg = &mips_cps_cluster_bootcfg[cluster]; /* * Now wait for the CPU to actually offline. Without doing this that @@ -668,7 +901,7 @@ static void cps_cleanup_dead_cpu(unsigned cpu) } while (1); /* Indicate the core is powered off */ - bitmap_clear(core_power, core, 1); + bitmap_clear(cluster_cfg->core_power, core, 1); } else if (cpu_has_mipsmt) { /* * Have a CPU with access to the offlined CPUs registers wait diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 0b53d35a116e..4868e79f3b30 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -56,8 +56,10 @@ EXPORT_SYMBOL(cpu_sibling_map); cpumask_t cpu_core_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_core_map); +#ifndef CONFIG_HOTPLUG_PARALLEL static DECLARE_COMPLETION(cpu_starting); static DECLARE_COMPLETION(cpu_running); +#endif /* * A logical cpu mask containing only one VPE per core to @@ -74,6 +76,8 @@ static cpumask_t cpu_core_setup_map; cpumask_t cpu_coherent_mask; +struct cpumask __cpu_primary_thread_mask __read_mostly; + unsigned int smp_max_threads __initdata = UINT_MAX; static int __init early_nosmt(char *s) @@ -367,6 +371,9 @@ asmlinkage void start_secondary(void) * to an option instead of something based on .cputype */ +#ifdef CONFIG_HOTPLUG_PARALLEL + cpuhp_ap_sync_alive(); +#endif calibrate_delay(); cpu_data[cpu].udelay_val = loops_per_jiffy; @@ -376,8 +383,10 @@ asmlinkage void start_secondary(void) cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); +#ifndef CONFIG_HOTPLUG_PARALLEL /* Notify boot CPU that we're starting & ready to sync counters */ complete(&cpu_starting); +#endif synchronise_count_slave(cpu); @@ -386,11 +395,13 @@ asmlinkage void start_secondary(void) calculate_cpu_foreign_map(); +#ifndef CONFIG_HOTPLUG_PARALLEL /* * Notify boot CPU that we're up & online and it can safely return * from __cpu_up */ complete(&cpu_running); +#endif /* * irq will be enabled in ->smp_finish(), enabling it too early @@ -439,7 +450,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } /* preload SMP state for boot cpu */ -void smp_prepare_boot_cpu(void) +void __init smp_prepare_boot_cpu(void) { if (mp_ops->prepare_boot_cpu) mp_ops->prepare_boot_cpu(); @@ -447,6 +458,12 @@ void smp_prepare_boot_cpu(void) set_cpu_online(0, true); } +#ifdef CONFIG_HOTPLUG_PARALLEL +int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle) +{ + return mp_ops->boot_secondary(cpu, tidle); +} +#else int __cpu_up(unsigned int cpu, struct task_struct *tidle) { int err; @@ -462,12 +479,11 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) return -EIO; } - synchronise_count_master(cpu); - /* Wait for CPU to finish startup & mark itself online before return */ wait_for_completion(&cpu_running); return 0; } +#endif #ifdef CONFIG_PROFILING /* Not really SMP stuff ... */ diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c index 71c7e5e27567..dd31e3fffd24 100644 --- a/arch/mips/kernel/spram.c +++ b/arch/mips/kernel/spram.c @@ -26,10 +26,6 @@ #define ERRCTL_SPRAM (1 << 28) -/* errctl access */ -#define read_c0_errctl(x) read_c0_ecc(x) -#define write_c0_errctl(x) write_c0_ecc(x) - /* * Different semantics to the set_c0_* function built by __BUILD_SET_C0 */ diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c index abdd7aaa3311..39156592582e 100644 --- a/arch/mips/kernel/sync-r4k.c +++ b/arch/mips/kernel/sync-r4k.c @@ -2,121 +2,244 @@ /* * Count register synchronisation. * - * All CPUs will have their count registers synchronised to the CPU0 next time - * value. This can cause a small timewarp for CPU0. All other CPU's should - * not have done anything significant (but they may have had interrupts - * enabled briefly - prom_smp_finish() should not be responsible for enabling - * interrupts...) + * Derived from arch/x86/kernel/tsc_sync.c + * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar */ #include <linux/kernel.h> #include <linux/irqflags.h> #include <linux/cpumask.h> +#include <linux/atomic.h> +#include <linux/nmi.h> +#include <linux/smp.h> +#include <linux/spinlock.h> #include <asm/r4k-timer.h> -#include <linux/atomic.h> -#include <asm/barrier.h> #include <asm/mipsregs.h> +#include <asm/time.h> -static unsigned int initcount = 0; -static atomic_t count_count_start = ATOMIC_INIT(0); -static atomic_t count_count_stop = ATOMIC_INIT(0); - -#define COUNTON 100 -#define NR_LOOPS 3 - -void synchronise_count_master(int cpu) -{ - int i; - unsigned long flags; - - pr_info("Synchronize counters for CPU %u: ", cpu); +#define COUNTON 100 +#define NR_LOOPS 3 +#define LOOP_TIMEOUT 20 - local_irq_save(flags); +/* + * Entry/exit counters that make sure that both CPUs + * run the measurement code at once: + */ +static atomic_t start_count; +static atomic_t stop_count; +static atomic_t test_runs; - /* - * We loop a few times to get a primed instruction cache, - * then the last pass is more or less synchronised and - * the master and slaves each set their cycle counters to a known - * value all at once. This reduces the chance of having random offsets - * between the processors, and guarantees that the maximum - * delay between the cycle counters is never bigger than - * the latency of information-passing (cachelines) between - * two CPUs. - */ +/* + * We use a raw spinlock in this exceptional case, because + * we want to have the fastest, inlined, non-debug version + * of a critical section, to be able to prove counter time-warps: + */ +static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; - for (i = 0; i < NR_LOOPS; i++) { - /* slaves loop on '!= 2' */ - while (atomic_read(&count_count_start) != 1) - mb(); - atomic_set(&count_count_stop, 0); - smp_wmb(); +static uint32_t last_counter; +static uint32_t max_warp; +static int nr_warps; +static int random_warps; - /* Let the slave writes its count register */ - atomic_inc(&count_count_start); +/* + * Counter warp measurement loop running on both CPUs. + */ +static uint32_t check_counter_warp(void) +{ + uint32_t start, now, prev, end, cur_max_warp = 0; + int i, cur_warps = 0; - /* Count will be initialised to current timer */ - if (i == 1) - initcount = read_c0_count(); + start = read_c0_count(); + end = start + (uint32_t) mips_hpt_frequency / 1000 * LOOP_TIMEOUT; + for (i = 0; ; i++) { /* - * Everyone initialises count in the last loop: + * We take the global lock, measure counter, save the + * previous counter that was measured (possibly on + * another CPU) and update the previous counter timestamp. */ - if (i == NR_LOOPS-1) - write_c0_count(initcount); + arch_spin_lock(&sync_lock); + prev = last_counter; + now = read_c0_count(); + last_counter = now; + arch_spin_unlock(&sync_lock); /* - * Wait for slave to leave the synchronization point: + * Be nice every now and then (and also check whether + * measurement is done [we also insert a 10 million + * loops safety exit, so we dont lock up in case the + * counter is totally broken]): */ - while (atomic_read(&count_count_stop) != 1) - mb(); - atomic_set(&count_count_start, 0); - smp_wmb(); - atomic_inc(&count_count_stop); + if (unlikely(!(i & 7))) { + if (now > end || i > 10000000) + break; + cpu_relax(); + touch_nmi_watchdog(); + } + /* + * Outside the critical section we can now see whether + * we saw a time-warp of the counter going backwards: + */ + if (unlikely(prev > now)) { + arch_spin_lock(&sync_lock); + max_warp = max(max_warp, prev - now); + cur_max_warp = max_warp; + /* + * Check whether this bounces back and forth. Only + * one CPU should observe time going backwards. + */ + if (cur_warps != nr_warps) + random_warps++; + nr_warps++; + cur_warps = nr_warps; + arch_spin_unlock(&sync_lock); + } + } + WARN(!(now-start), + "Warning: zero counter calibration delta: %d [max: %d]\n", + now-start, end-start); + return cur_max_warp; +} + +/* + * The freshly booted CPU initiates this via an async SMP function call. + */ +static void check_counter_sync_source(void *__cpu) +{ + unsigned int cpu = (unsigned long)__cpu; + int cpus = 2; + + atomic_set(&test_runs, NR_LOOPS); +retry: + /* Wait for the target to start. */ + while (atomic_read(&start_count) != cpus - 1) + cpu_relax(); + + /* + * Trigger the target to continue into the measurement too: + */ + atomic_inc(&start_count); + + check_counter_warp(); + + while (atomic_read(&stop_count) != cpus-1) + cpu_relax(); + + /* + * If the test was successful set the number of runs to zero and + * stop. If not, decrement the number of runs an check if we can + * retry. In case of random warps no retry is attempted. + */ + if (!nr_warps) { + atomic_set(&test_runs, 0); + + pr_info("Counter synchronization [CPU#%d -> CPU#%u]: passed\n", + smp_processor_id(), cpu); + } else if (atomic_dec_and_test(&test_runs) || random_warps) { + /* Force it to 0 if random warps brought us here */ + atomic_set(&test_runs, 0); + + pr_info("Counter synchronization [CPU#%d -> CPU#%u]:\n", + smp_processor_id(), cpu); + pr_info("Measured %d cycles counter warp between CPUs", max_warp); + if (random_warps) + pr_warn("Counter warped randomly between CPUs\n"); } - /* Arrange for an interrupt in a short while */ - write_c0_compare(read_c0_count() + COUNTON); - local_irq_restore(flags); + /* + * Reset it - just in case we boot another CPU later: + */ + atomic_set(&start_count, 0); + random_warps = 0; + nr_warps = 0; + max_warp = 0; + last_counter = 0; + + /* + * Let the target continue with the bootup: + */ + atomic_inc(&stop_count); /* - * i386 code reported the skew here, but the - * count registers were almost certainly out of sync - * so no point in alarming people + * Retry, if there is a chance to do so. */ - pr_cont("done.\n"); + if (atomic_read(&test_runs) > 0) + goto retry; } +/* + * Freshly booted CPUs call into this: + */ void synchronise_count_slave(int cpu) { - int i; - unsigned long flags; + uint32_t cur_max_warp, gbl_max_warp, count; + int cpus = 2; - local_irq_save(flags); + if (!cpu_has_counter || !mips_hpt_frequency) + return; + /* Kick the control CPU into the counter synchronization function */ + smp_call_function_single(cpumask_first(cpu_online_mask), + check_counter_sync_source, + (unsigned long *)(unsigned long)cpu, 0); +retry: /* - * Not every cpu is online at the time this gets called, - * so we first wait for the master to say everyone is ready + * Register this CPU's participation and wait for the + * source CPU to start the measurement: */ + atomic_inc(&start_count); + while (atomic_read(&start_count) != cpus) + cpu_relax(); - for (i = 0; i < NR_LOOPS; i++) { - atomic_inc(&count_count_start); - while (atomic_read(&count_count_start) != 2) - mb(); + cur_max_warp = check_counter_warp(); - /* - * Everyone initialises count in the last loop: - */ - if (i == NR_LOOPS-1) - write_c0_count(initcount); + /* + * Store the maximum observed warp value for a potential retry: + */ + gbl_max_warp = max_warp; + + /* + * Ok, we are done: + */ + atomic_inc(&stop_count); + + /* + * Wait for the source CPU to print stuff: + */ + while (atomic_read(&stop_count) != cpus) + cpu_relax(); - atomic_inc(&count_count_stop); - while (atomic_read(&count_count_stop) != 2) - mb(); + /* + * Reset it for the next sync test: + */ + atomic_set(&stop_count, 0); + + /* + * Check the number of remaining test runs. If not zero, the test + * failed and a retry with adjusted counter is possible. If zero the + * test was either successful or failed terminally. + */ + if (!atomic_read(&test_runs)) { + /* Arrange for an interrupt in a short while */ + write_c0_compare(read_c0_count() + COUNTON); + return; } - /* Arrange for an interrupt in a short while */ - write_c0_compare(read_c0_count() + COUNTON); - local_irq_restore(flags); + /* + * If the warp value of this CPU is 0, then the other CPU + * observed time going backwards so this counter was ahead and + * needs to move backwards. + */ + if (!cur_max_warp) + cur_max_warp = -gbl_max_warp; + + count = read_c0_count(); + count += cur_max_warp; + write_c0_count(count); + + pr_debug("Counter compensate: CPU%u observed %d warp\n", cpu, cur_max_warp); + + goto retry; + } -#undef NR_LOOPS diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile index e6b21de65cca..56f6f093bb88 100644 --- a/arch/mips/kernel/syscalls/Makefile +++ b/arch/mips/kernel/syscalls/Makefile @@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm $(shell mkdir -p $(uapi) $(kapi)) syshdr := $(srctree)/scripts/syscallhdr.sh -sysnr := $(srctree)/$(src)/syscallnr.sh +sysnr := $(src)/syscallnr.sh systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index 83cfc9eb6b88..aa70e371bb54 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -354,7 +354,7 @@ 412 n32 utimensat_time64 sys_utimensat 413 n32 pselect6_time64 compat_sys_pselect6_time64 414 n32 ppoll_time64 compat_sys_ppoll_time64 -416 n32 io_pgetevents_time64 sys_io_pgetevents +416 n32 io_pgetevents_time64 compat_sys_io_pgetevents_time64 417 n32 recvmmsg_time64 compat_sys_recvmmsg_time64 418 n32 mq_timedsend_time64 sys_mq_timedsend 419 n32 mq_timedreceive_time64 sys_mq_timedreceive @@ -400,3 +400,9 @@ 459 n32 lsm_get_self_attr sys_lsm_get_self_attr 460 n32 lsm_set_self_attr sys_lsm_set_self_attr 461 n32 lsm_list_modules sys_lsm_list_modules +462 n32 mseal sys_mseal +463 n32 setxattrat sys_setxattrat +464 n32 getxattrat sys_getxattrat +465 n32 listxattrat sys_listxattrat +466 n32 removexattrat sys_removexattrat +467 n32 open_tree_attr sys_open_tree_attr diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index 532b855df589..1e8c44c7b614 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -376,3 +376,9 @@ 459 n64 lsm_get_self_attr sys_lsm_get_self_attr 460 n64 lsm_set_self_attr sys_lsm_set_self_attr 461 n64 lsm_list_modules sys_lsm_list_modules +462 n64 mseal sys_mseal +463 n64 setxattrat sys_setxattrat +464 n64 getxattrat sys_getxattrat +465 n64 listxattrat sys_listxattrat +466 n64 removexattrat sys_removexattrat +467 n64 open_tree_attr sys_open_tree_attr diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index f45c9530ea93..114a5a1a6230 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -27,7 +27,7 @@ 17 o32 break sys_ni_syscall # 18 was sys_stat 18 o32 unused18 sys_ni_syscall -19 o32 lseek sys_lseek +19 o32 lseek sys_lseek compat_sys_lseek 20 o32 getpid sys_getpid 21 o32 mount sys_mount 22 o32 umount sys_oldumount @@ -403,7 +403,7 @@ 412 o32 utimensat_time64 sys_utimensat sys_utimensat 413 o32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 414 o32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 -416 o32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents +416 o32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 417 o32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 418 o32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend 419 o32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive @@ -449,3 +449,9 @@ 459 o32 lsm_get_self_attr sys_lsm_get_self_attr 460 o32 lsm_set_self_attr sys_lsm_set_self_attr 461 o32 lsm_list_modules sys_lsm_list_modules +462 o32 mseal sys_mseal +463 o32 setxattrat sys_setxattrat +464 o32 getxattrat sys_getxattrat +465 o32 listxattrat sys_listxattrat +466 o32 removexattrat sys_removexattrat +467 o32 open_tree_attr sys_open_tree_attr diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index dc29bd9656b0..8ec1e185b35c 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -38,6 +38,7 @@ #include <linux/kdb.h> #include <linux/irq.h> #include <linux/perf_event.h> +#include <linux/string_choices.h> #include <asm/addrspace.h> #include <asm/bootinfo.h> @@ -76,7 +77,7 @@ #include "access-helper.h" extern void check_wait(void); -extern asmlinkage void rollback_handle_int(void); +extern asmlinkage void skipover_handle_int(void); extern asmlinkage void handle_int(void); extern asmlinkage void handle_adel(void); extern asmlinkage void handle_ades(void); @@ -1705,10 +1706,10 @@ static inline __init void parity_protection_init(void) l2parity &= l1parity; /* Probe L1 ECC support */ - cp0_ectl = read_c0_ecc(); - write_c0_ecc(cp0_ectl | ERRCTL_PE); + cp0_ectl = read_c0_errctl(); + write_c0_errctl(cp0_ectl | ERRCTL_PE); back_to_back_c0_hazard(); - cp0_ectl = read_c0_ecc(); + cp0_ectl = read_c0_errctl(); /* Probe L2 ECC support */ gcr_ectl = read_gcr_err_control(); @@ -1727,9 +1728,9 @@ static inline __init void parity_protection_init(void) cp0_ectl |= ERRCTL_PE; else cp0_ectl &= ~ERRCTL_PE; - write_c0_ecc(cp0_ectl); + write_c0_errctl(cp0_ectl); back_to_back_c0_hazard(); - WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity); + WARN_ON(!!(read_c0_errctl() & ERRCTL_PE) != l1parity); /* Configure L2 ECC checking */ if (l2parity) @@ -1741,8 +1742,8 @@ static inline __init void parity_protection_init(void) gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN; WARN_ON(!!gcr_ectl != l2parity); - pr_info("Cache parity protection %sabled\n", - l1parity ? "en" : "dis"); + pr_info("Cache parity protection %s\n", + str_enabled_disabled(l1parity)); return; } @@ -1761,18 +1762,18 @@ static inline __init void parity_protection_init(void) unsigned long errctl; unsigned int l1parity_present, l2parity_present; - errctl = read_c0_ecc(); + errctl = read_c0_errctl(); errctl &= ~(ERRCTL_PE|ERRCTL_L2P); /* probe L1 parity support */ - write_c0_ecc(errctl | ERRCTL_PE); + write_c0_errctl(errctl | ERRCTL_PE); back_to_back_c0_hazard(); - l1parity_present = (read_c0_ecc() & ERRCTL_PE); + l1parity_present = (read_c0_errctl() & ERRCTL_PE); /* probe L2 parity support */ - write_c0_ecc(errctl|ERRCTL_L2P); + write_c0_errctl(errctl|ERRCTL_L2P); back_to_back_c0_hazard(); - l2parity_present = (read_c0_ecc() & ERRCTL_L2P); + l2parity_present = (read_c0_errctl() & ERRCTL_L2P); if (l1parity_present && l2parity_present) { if (l1parity) @@ -1791,20 +1792,20 @@ static inline __init void parity_protection_init(void) printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl); - write_c0_ecc(errctl); + write_c0_errctl(errctl); back_to_back_c0_hazard(); - errctl = read_c0_ecc(); + errctl = read_c0_errctl(); printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl); if (l1parity_present) - printk(KERN_INFO "Cache parity protection %sabled\n", - (errctl & ERRCTL_PE) ? "en" : "dis"); + pr_info("Cache parity protection %s\n", + str_enabled_disabled(errctl & ERRCTL_PE)); if (l2parity_present) { if (l1parity_present && l1parity) errctl ^= ERRCTL_L2P; - printk(KERN_INFO "L2 cache parity protection %sabled\n", - (errctl & ERRCTL_L2P) ? "en" : "dis"); + pr_info("L2 cache parity protection %s\n", + str_enabled_disabled(errctl & ERRCTL_L2P)); } } break; @@ -1812,11 +1813,11 @@ static inline __init void parity_protection_init(void) case CPU_5KC: case CPU_5KE: case CPU_LOONGSON32: - write_c0_ecc(0x80000000); + write_c0_errctl(0x80000000); back_to_back_c0_hazard(); /* Set the PE bit (bit 31) in the c0_errctl register. */ - printk(KERN_INFO "Cache parity protection %sabled\n", - (read_c0_ecc() & 0x80000000) ? "en" : "dis"); + pr_info("Cache parity protection %s\n", + str_enabled_disabled(read_c0_errctl() & 0x80000000)); break; case CPU_20KC: case CPU_25KF: @@ -1887,8 +1888,8 @@ asmlinkage void do_ftlb(void) if ((cpu_has_mips_r2_r6) && (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) || ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) { - pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", - read_c0_ecc()); + pr_err("FTLB error exception, cp0_errctl=0x%08x:\n", + read_c0_errctl()); pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); reg_val = read_c0_cacheerr(); pr_err("c0_cacheerr == %08x\n", reg_val); @@ -2065,7 +2066,7 @@ void *set_vi_handler(int n, vi_handler_t addr) { extern const u8 except_vec_vi[]; extern const u8 except_vec_vi_ori[], except_vec_vi_end[]; - extern const u8 rollback_except_vec_vi[]; + extern const u8 skipover_except_vec_vi[]; unsigned long handler; unsigned long old_handler = vi_handlers[n]; int srssets = current_cpu_data.srsets; @@ -2094,7 +2095,7 @@ void *set_vi_handler(int n, vi_handler_t addr) change_c0_srsmap(0xf << n*4, 0 << n*4); } - vec_start = using_rollback_handler() ? rollback_except_vec_vi : + vec_start = using_skipover_handler() ? skipover_except_vec_vi : except_vec_vi; #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) ori_offset = except_vec_vi_ori - vec_start + 2; @@ -2425,8 +2426,8 @@ void __init trap_init(void) if (board_be_init) board_be_init(); - set_except_vector(EXCCODE_INT, using_rollback_handler() ? - rollback_handle_int : handle_int); + set_except_vector(EXCCODE_INT, using_skipover_handler() ? + skipover_handle_int : handle_int); set_except_vector(EXCCODE_MOD, handle_tlbm); set_except_vector(EXCCODE_TLBL, handle_tlbl); set_except_vector(EXCCODE_TLBS, handle_tlbs); diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index dda36fa26307..de096777172f 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -11,10 +11,11 @@ #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/mman.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/slab.h> -#include <linux/timekeeper_internal.h> +#include <linux/vdso_datastore.h> #include <asm/abi.h> #include <asm/mips-cps.h> @@ -23,20 +24,7 @@ #include <vdso/helpers.h> #include <vdso/vsyscall.h> -/* Kernel-provided data used by the VDSO. */ -static union vdso_data_store mips_vdso_data __page_aligned_data; -struct vdso_data *vdso_data = mips_vdso_data.data; - -/* - * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as - * what we map and where within the area they are mapped is determined at - * runtime. - */ -static struct page *no_pages[] = { NULL }; -static struct vm_special_mapping vdso_vvar_mapping = { - .name = "[vvar]", - .pages = no_pages, -}; +static_assert(VDSO_NR_PAGES == __VDSO_PAGES); static void __init init_vdso_image(struct mips_vdso_image *image) { @@ -90,7 +78,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mips_vdso_image *image = current->thread.abi->vdso; struct mm_struct *mm = current->mm; - unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base; + unsigned long gic_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base; struct vm_area_struct *vma; int ret; @@ -98,11 +86,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) return -EINTR; if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) { + unsigned long unused; + /* Map delay slot emulation page */ - base = mmap_region(NULL, STACK_TOP, PAGE_SIZE, - VM_READ | VM_EXEC | - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, - 0, NULL); + base = do_mmap(NULL, STACK_TOP, PAGE_SIZE, PROT_READ | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, 0, 0, &unused, + NULL); if (IS_ERR_VALUE(base)) { ret = base; goto out; @@ -118,8 +107,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) * the counter registers at the start. */ gic_size = mips_gic_present() ? PAGE_SIZE : 0; - vvar_size = gic_size + PAGE_SIZE; - size = vvar_size + image->size; + size = gic_size + VDSO_NR_PAGES * PAGE_SIZE + image->size; /* * Find a region that's large enough for us to perform the @@ -142,15 +130,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) */ if (cpu_has_dc_aliases) { base = __ALIGN_MASK(base, shm_align_mask); - base += ((unsigned long)vdso_data - gic_size) & shm_align_mask; + base += ((unsigned long)vdso_k_time_data - gic_size) & shm_align_mask; } data_addr = base + gic_size; - vdso_addr = data_addr + PAGE_SIZE; + vdso_addr = data_addr + VDSO_NR_PAGES * PAGE_SIZE; - vma = _install_special_mapping(mm, base, vvar_size, - VM_READ | VM_MAYREAD, - &vdso_vvar_mapping); + vma = vdso_install_vvar_mapping(mm, data_addr); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; @@ -160,6 +146,17 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) if (gic_size) { gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS; gic_pfn = PFN_DOWN(__pa(gic_base)); + static const struct vm_special_mapping gic_mapping = { + .name = "[gic]", + .pages = (struct page **) { NULL }, + }; + + vma = _install_special_mapping(mm, base, gic_size, VM_READ | VM_MAYREAD, + &gic_mapping); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto out; + } ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size, pgprot_noncached(vma->vm_page_prot)); @@ -167,13 +164,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) goto out; } - /* Map data page. */ - ret = remap_pfn_range(vma, data_addr, - virt_to_phys(vdso_data) >> PAGE_SHIFT, - PAGE_SIZE, vma->vm_page_prot); - if (ret) - goto out; - /* Map VDSO image. */ vma = _install_special_mapping(mm, vdso_addr, image->size, VM_READ | VM_EXEC | diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 9ff55cb80a64..2b708fac8d2c 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -61,6 +61,7 @@ SECTIONS /* read-only */ _text = .; /* Text and read-only data */ .text : { + HEAD_TEXT TEXT_TEXT SCHED_TEXT LOCK_TEXT diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 737d0d4fdcd3..2b67c44adab9 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -22,6 +22,7 @@ #include <linux/vmalloc.h> #include <linux/elf.h> #include <linux/seq_file.h> +#include <linux/string.h> #include <linux/syscalls.h> #include <linux/moduleloader.h> #include <linux/interrupt.h> @@ -582,7 +583,7 @@ static int vpe_elfload(struct vpe *v) struct module mod; /* so we can re-use the relocations code */ memset(&mod, 0, sizeof(struct module)); - strcpy(mod.name, "VPE loader"); + strscpy(mod.name, "VPE loader"); hdr = (Elf_Ehdr *) v->pbuffer; len = v->plen; |