diff options
Diffstat (limited to 'arch/mips/kernel')
30 files changed, 601 insertions, 285 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index ecf3278a32f7..95a1e674fd67 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -3,7 +3,7 @@ # Makefile for the Linux/MIPS kernel. # -extra-y := vmlinux.lds +always-$(KBUILD_BUILTIN) := vmlinux.lds obj-y += head.o branch.o cmpxchg.o elf.o entry.o genex.o idle.o irq.o \ process.o prom.o ptrace.o reset.o setup.o signal.o \ diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index cb1045ebab06..5debd9a3854a 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -9,6 +9,8 @@ * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. */ +#define COMPILE_OFFSETS + #include <linux/compat.h> #include <linux/types.h> #include <linux/sched.h> @@ -27,6 +29,12 @@ void output_ptreg_defines(void); void output_ptreg_defines(void) { COMMENT("MIPS pt_regs offsets."); +#ifdef CONFIG_32BIT + OFFSET(PT_ARG4, pt_regs, args[4]); + OFFSET(PT_ARG5, pt_regs, args[5]); + OFFSET(PT_ARG6, pt_regs, args[6]); + OFFSET(PT_ARG7, pt_regs, args[7]); +#endif OFFSET(PT_R0, pt_regs, regs[0]); OFFSET(PT_R1, pt_regs, regs[1]); OFFSET(PT_R2, pt_regs, regs[2]); @@ -404,6 +412,9 @@ void output_cps_defines(void) { COMMENT(" MIPS CPS offsets. "); + OFFSET(CLUSTERBOOTCFG_CORECONFIG, cluster_boot_config, core_config); + DEFINE(CLUSTERBOOTCFG_SIZE, sizeof(struct cluster_boot_config)); + OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask); OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config); DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config)); diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c index d39a2963b451..2a14dc4ee57e 100644 --- a/arch/mips/kernel/cevt-bcm1480.c +++ b/arch/mips/kernel/cevt-bcm1480.c @@ -103,7 +103,7 @@ void sb1480_clockevent_init(void) BUG_ON(cpu > 3); /* Only have 4 general purpose timers */ - sprintf(name, "bcm1480-counter-%d", cpu); + sprintf(name, "bcm1480-counter-%u", cpu); cd->name = name; cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c index 9a47fbcd4638..de64d6bb7ba3 100644 --- a/arch/mips/kernel/cevt-ds1287.c +++ b/arch/mips/kernel/cevt-ds1287.c @@ -10,6 +10,7 @@ #include <linux/mc146818rtc.h> #include <linux/irq.h> +#include <asm/ds1287.h> #include <asm/time.h> int ds1287_timer_state(void) diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index f876309130ad..2ae7034a3d5c 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -19,6 +19,10 @@ #define GCR_CPC_BASE_OFS 0x0088 #define GCR_CL_COHERENCE_OFS 0x2008 #define GCR_CL_ID_OFS 0x2028 +#define CM3_GCR_Cx_ID_CLUSTER_SHF 8 +#define CM3_GCR_Cx_ID_CLUSTER_MSK (0xff << 8) +#define CM3_GCR_Cx_ID_CORENUM_SHF 0 +#define CM3_GCR_Cx_ID_CORENUM_MSK (0xff << 0) #define CPC_CL_VC_STOP_OFS 0x2020 #define CPC_CL_VC_RUN_OFS 0x2028 @@ -271,12 +275,21 @@ LEAF(mips_cps_core_init) */ LEAF(mips_cps_get_bootcfg) /* Calculate a pointer to this cores struct core_boot_config */ + PTR_LA v0, mips_cps_cluster_bootcfg + PTR_L v0, 0(v0) lw t0, GCR_CL_ID_OFS(s1) +#ifdef CONFIG_CPU_MIPSR6 + ext t1, t0, CM3_GCR_Cx_ID_CLUSTER_SHF, 8 + li t2, CLUSTERBOOTCFG_SIZE + mul t1, t1, t2 + PTR_ADDU \ + v0, v0, t1 +#endif + PTR_L v0, CLUSTERBOOTCFG_CORECONFIG(v0) + andi t0, t0, CM3_GCR_Cx_ID_CORENUM_MSK li t1, COREBOOTCFG_SIZE mul t0, t0, t1 - PTR_LA t1, mips_cps_core_bootcfg - PTR_L t1, 0(t1) - PTR_ADDU v0, t0, t1 + PTR_ADDU v0, v0, t0 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ li t9, 0 diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index af7412549e6e..1e49e05ac8b1 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -9,6 +9,7 @@ */ #include <linux/init.h> #include <linux/kernel.h> +#include <linux/mmu_context.h> #include <linux/ptrace.h> #include <linux/smp.h> #include <linux/stddef.h> @@ -37,6 +38,8 @@ unsigned int elf_hwcap __read_mostly; EXPORT_SYMBOL_GPL(elf_hwcap); +static bool mmid_disabled_quirk; + static inline unsigned long cpu_get_msa_id(void) { unsigned long status, msa_id; @@ -645,7 +648,7 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE); if (cpu_has_mips_r6) { - if (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid) + if (!mmid_disabled_quirk && (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid)) config5 |= MIPS_CONF5_MI; else config5 &= ~MIPS_CONF5_MI; @@ -708,7 +711,6 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) max_mmid_width); asid_mask = GENMASK(max_mmid_width - 1, 0); } - set_cpu_asid_mask(c, asid_mask); } } @@ -1286,14 +1288,14 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) set_cpu_asid_mask(c, MIPS_ENTRYHI_ASID); c->writecombine = _CACHE_UNCACHED_ACCELERATED; break; - case PRID_IMP_LOONGSON_32: /* Loongson-1 */ + case PRID_IMP_LOONGSON_32: decode_configs(c); c->cputype = CPU_LOONGSON32; switch (c->processor_id & PRID_REV_MASK) { - case PRID_REV_LOONGSON1B: - __cpu_name[cpu] = "Loongson 1B"; + case PRID_REV_LOONGSON1: + __cpu_name[cpu] = "ICT Loongson-1"; break; } @@ -2046,3 +2048,39 @@ void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsigned int vpe) cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_VP; cpuinfo->globalnumber |= vpe << MIPS_GLOBALNUMBER_VP_SHF; } + +void cpu_disable_mmid(void) +{ + int i; + unsigned long asid_mask; + unsigned int cpu = smp_processor_id(); + struct cpuinfo_mips *c = ¤t_cpu_data; + unsigned int config4 = read_c0_config4(); + unsigned int config5 = read_c0_config5(); + + /* Setup the initial ASID mask based on config4 */ + asid_mask = MIPS_ENTRYHI_ASID; + if (config4 & MIPS_CONF4_AE) + asid_mask |= MIPS_ENTRYHI_ASIDX; + set_cpu_asid_mask(c, asid_mask); + + /* Disable MMID in the C0 and update cpuinfo_mips accordingly */ + config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE); + config5 &= ~MIPS_CONF5_MI; + write_c0_config5(config5); + /* Ensure the write to config5 above takes effect */ + back_to_back_c0_hazard(); + c->options &= ~MIPS_CPU_MMID; + + /* Setup asid cache value cleared in per_cpu_trap_init() */ + cpu_data[cpu].asid_cache = asid_first_version(cpu); + + /* Reinit context for each CPU */ + for_each_possible_cpu(i) + set_cpu_context(i, &init_mm, 0); + + /* Ensure that now MMID will be seen as disable */ + mmid_disabled_quirk = true; + + pr_info("MMID support disabled due to hardware support issue\n"); +} diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 8c401e42301c..b15615b28569 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -54,10 +54,20 @@ static inline void ftrace_dyn_arch_init_insns(void) u32 *buf; unsigned int v1; - /* la v1, _mcount */ - v1 = 3; - buf = (u32 *)&insn_la_mcount[0]; - UASM_i_LA(&buf, v1, MCOUNT_ADDR); + /* If we are not in compat space, the number of generated + * instructions will exceed the maximum expected limit of 2. + * To prevent buffer overflow, we avoid generating them. + * insn_la_mcount will not be used later in ftrace_make_call. + */ + if (uasm_in_compat_space_p(MCOUNT_ADDR)) { + /* la v1, _mcount */ + v1 = 3; + buf = (u32 *)&insn_la_mcount[0]; + UASM_i_LA(&buf, v1, MCOUNT_ADDR); + } else { + pr_warn("ftrace: mcount address beyond 32 bits is not supported (%lX)\n", + MCOUNT_ADDR); + } /* jal (ftrace_caller + 8), jump over the first two instruction */ buf = (u32 *)&insn_jal_ftrace_caller; @@ -189,6 +199,13 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) unsigned int new; unsigned long ip = rec->ip; + /* When the code to patch does not belong to the kernel code + * space, we must use insn_la_mcount. However, if MCOUNT_ADDR + * is not in compat space, insn_la_mcount is not usable. + */ + if (!core_kernel_text(ip) && !uasm_in_compat_space_p(MCOUNT_ADDR)) + return -EFAULT; + new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; #ifdef CONFIG_64BIT @@ -248,7 +265,7 @@ int ftrace_disable_ftrace_graph_caller(void) #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ -unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long +static unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) { unsigned long sp, ip, tmp; diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index a572ce36a24f..be1b049d856f 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -104,48 +104,59 @@ handle_vcei: __FINIT - .align 5 /* 32 byte rollback region */ -LEAF(__r4k_wait) - .set push - .set noreorder - /* start of rollback region */ - LONG_L t0, TI_FLAGS($28) - nop - andi t0, _TIF_NEED_RESCHED - bnez t0, 1f - nop - nop - nop -#ifdef CONFIG_CPU_MICROMIPS - nop - nop - nop - nop -#endif + .section .cpuidle.text,"ax" + /* Align to 32 bytes for the maximum idle interrupt region size. */ + .align 5 +LEAF(r4k_wait) + /* Keep the ISA bit clear for calculations on local labels here. */ +0: .fill 0 + /* Start of idle interrupt region. */ + local_irq_enable + /* + * If an interrupt lands here, before going idle on the next + * instruction, we must *NOT* go idle since the interrupt could + * have set TIF_NEED_RESCHED or caused a timer to need resched. + * Fall through -- see skipover_handler below -- and have the + * idle loop take care of things. + */ +1: .fill 0 + /* The R2 EI/EHB sequence takes 8 bytes, otherwise pad up. */ + .if 1b - 0b > 32 + .error "overlong idle interrupt region" + .elseif 1b - 0b > 8 + .align 4 + .endif +2: .fill 0 + .equ r4k_wait_idle_size, 2b - 0b + /* End of idle interrupt region; size has to be a power of 2. */ .set MIPS_ISA_ARCH_LEVEL_RAW +r4k_wait_insn: wait - /* end of rollback region (the region size must be power of two) */ -1: +r4k_wait_exit: + .set mips0 + local_irq_disable jr ra - nop - .set pop - END(__r4k_wait) + END(r4k_wait) + .previous - .macro BUILD_ROLLBACK_PROLOGUE handler - FEXPORT(rollback_\handler) + .macro BUILD_SKIPOVER_PROLOGUE handler + FEXPORT(skipover_\handler) .set push .set noat MFC0 k0, CP0_EPC - PTR_LA k1, __r4k_wait - ori k0, 0x1f /* 32 byte rollback region */ - xori k0, 0x1f + /* Subtract/add 2 to let the ISA bit propagate through the mask. */ + PTR_LA k1, r4k_wait_insn - 2 + ori k0, r4k_wait_idle_size - 2 + .set noreorder bne k0, k1, \handler + PTR_ADDIU k0, r4k_wait_exit - r4k_wait_insn + 2 + .set reorder MTC0 k0, CP0_EPC .set pop .endm .align 5 -BUILD_ROLLBACK_PROLOGUE handle_int +BUILD_SKIPOVER_PROLOGUE handle_int NESTED(handle_int, PT_SIZE, sp) .cfi_signal_frame #ifdef CONFIG_TRACE_IRQFLAGS @@ -265,7 +276,7 @@ NESTED(except_vec_ejtag_debug, 0, sp) * This prototype is copied to ebase + n*IntCtl.VS and patched * to invoke the handler */ -BUILD_ROLLBACK_PROLOGUE except_vec_vi +BUILD_SKIPOVER_PROLOGUE except_vec_vi NESTED(except_vec_vi, 0, sp) SAVE_SOME docfi=1 SAVE_AT docfi=1 diff --git a/arch/mips/kernel/gpio_txx9.c b/arch/mips/kernel/gpio_txx9.c index 8c083612df9d..96ac40d20c23 100644 --- a/arch/mips/kernel/gpio_txx9.c +++ b/arch/mips/kernel/gpio_txx9.c @@ -32,14 +32,16 @@ static void txx9_gpio_set_raw(unsigned int offset, int value) __raw_writel(val, &txx9_pioptr->dout); } -static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) +static int txx9_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { unsigned long flags; spin_lock_irqsave(&txx9_gpio_lock, flags); txx9_gpio_set_raw(offset, value); mmiowb(); spin_unlock_irqrestore(&txx9_gpio_lock, flags); + + return 0; } static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset) diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index b825ed4476c7..d99ed58b7043 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -59,6 +59,8 @@ #endif .endm + __HEAD + #ifndef CONFIG_NO_EXCEPT_FILL /* * Reserved space for exception handlers. diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 5abc8b7340f8..80e8a04a642e 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -35,13 +35,6 @@ static void __cpuidle r3081_wait(void) write_c0_conf(cfg | R30XX_CONF_HALT); } -void __cpuidle r4k_wait(void) -{ - raw_local_irq_enable(); - __r4k_wait(); - raw_local_irq_disable(); -} - /* * This variant is preferable as it allows testing need_resched and going to * sleep depending on the outcome atomically. Unfortunately the "It is diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 3eb2cfb893e1..7c9c5dc38823 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -5,15 +5,18 @@ */ #include <linux/errno.h> +#include <linux/of.h> #include <linux/percpu.h> #include <linux/spinlock.h> #include <asm/mips-cps.h> +#include <asm/smp-cps.h> #include <asm/mipsregs.h> void __iomem *mips_gcr_base; void __iomem *mips_cm_l2sync_base; int mips_cm_is64; +bool mips_cm_is_l2_hci_broken; static char *cm2_tr[8] = { "mem", "gcr", "gic", "mmio", @@ -237,6 +240,23 @@ static void mips_cm_probe_l2sync(void) mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE); } +void mips_cm_update_property(void) +{ + struct device_node *cm_node; + + cm_node = of_find_compatible_node(of_root, NULL, "mobileye,eyeq6-cm"); + if (!cm_node) + return; + pr_info("HCI (Hardware Cache Init for the L2 cache) in GCR_L2_RAM_CONFIG from the CM3 is broken"); + mips_cm_is_l2_hci_broken = true; + + /* Disable MMID only if it was configured */ + if (cpu_has_mmid) + cpu_disable_mmid(); + + of_node_put(cm_node); +} + int mips_cm_probe(void) { phys_addr_t addr; @@ -308,7 +328,9 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core, FIELD_PREP(CM3_GCR_Cx_OTHER_VP, vp); if (cm_rev >= CM_REV_CM3_5) { - val |= CM_GCR_Cx_OTHER_CLUSTER_EN; + if (cluster != cpu_cluster(¤t_cpu_data)) + val |= CM_GCR_Cx_OTHER_CLUSTER_EN; + val |= CM_GCR_Cx_OTHER_GIC_EN; val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster); val |= FIELD_PREP(CM_GCR_Cx_OTHER_BLOCK, block); } else { @@ -513,39 +535,23 @@ void mips_cm_error_report(void) write_gcr_error_cause(cm_error); } -unsigned int mips_cps_first_online_in_cluster(void) +unsigned int mips_cps_first_online_in_cluster(int *first_cpu) { - unsigned int local_cl; - int i; - - local_cl = cpu_cluster(¤t_cpu_data); + unsigned int local_cl = cpu_cluster(¤t_cpu_data); + struct cpumask *local_cl_mask; /* - * We rely upon knowledge that CPUs are numbered sequentially by - * cluster - ie. CPUs 0..X will be in cluster 0, CPUs X+1..Y in cluster - * 1, CPUs Y+1..Z in cluster 2 etc. This means that CPUs in the same - * cluster will immediately precede or follow one another. - * - * First we scan backwards, until we find an online CPU in the cluster - * or we move on to another cluster. + * mips_cps_cluster_bootcfg is allocated in cps_prepare_cpus. If it is + * not yet done, then we are so early that only one CPU is running, so + * it is the first online CPU in the cluster. */ - for (i = smp_processor_id() - 1; i >= 0; i--) { - if (cpu_cluster(&cpu_data[i]) != local_cl) - break; - if (!cpu_online(i)) - continue; - return false; - } - - /* Then do the same for higher numbered CPUs */ - for (i = smp_processor_id() + 1; i < nr_cpu_ids; i++) { - if (cpu_cluster(&cpu_data[i]) != local_cl) - break; - if (!cpu_online(i)) - continue; - return false; - } - - /* We found no online CPUs in the local cluster */ - return true; + if (IS_ENABLED(CONFIG_MIPS_CPS) && mips_cps_cluster_bootcfg) + local_cl_mask = &mips_cps_cluster_bootcfg[local_cl].cpumask; + else + return true; + + *first_cpu = cpumask_any_and_but(local_cl_mask, + cpu_online_mask, + smp_processor_id()); + return (*first_cpu >= nr_cpu_ids); } diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 37676a44fefb..2ef610650a9e 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -122,9 +122,8 @@ void mips_mt_set_cpuoptions(void) unsigned long ectlval; unsigned long itcblkgrn; - /* ErrCtl register is known as "ecc" to Linux */ - ectlval = read_c0_ecc(); - write_c0_ecc(ectlval | (0x1 << 26)); + ectlval = read_c0_errctl(); + write_c0_errctl(ectlval | (0x1 << 26)); ehb(); #define INDEX_0 (0x80000000) #define INDEX_8 (0x80000008) @@ -145,7 +144,7 @@ void mips_mt_set_cpuoptions(void) ehb(); /* Write out to ITU with CACHE op */ cache_op(Index_Store_Tag_D, INDEX_0); - write_c0_ecc(ectlval); + write_c0_errctl(ectlval); ehb(); printk("Mapped %ld ITC cells starting at 0x%08x\n", ((itcblkgrn & 0x7fe00000) >> 20), itc_base); diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index c4d6b09136b1..196a070349b0 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -791,8 +791,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc, if (!mipspmu_event_set_period(event, hwc, idx)) return; - if (perf_event_overflow(event, data, regs)) - mipsxx_pmu_disable_event(idx); + perf_event_overflow(event, data, regs); } diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index d09ca77e624d..3de0e05e0511 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -57,10 +57,7 @@ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); /* Indicates online CPUs coupled with the current CPU */ static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); -/* - * Used to synchronize entry to deep idle states. Actually per-core rather - * than per-CPU. - */ +/* Used to synchronize entry to deep idle states */ static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); /* Saved CPU state across the CPS_PM_POWER_GATED state */ @@ -104,17 +101,20 @@ static void coupled_barrier(atomic_t *a, unsigned online) int cps_pm_enter_state(enum cps_pm_state state) { unsigned cpu = smp_processor_id(); + unsigned int cluster = cpu_cluster(¤t_cpu_data); unsigned core = cpu_core(¤t_cpu_data); unsigned online, left; cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); u32 *core_ready_count, *nc_core_ready_count; void *nc_addr; cps_nc_entry_fn entry; + struct cluster_boot_config *cluster_cfg; struct core_boot_config *core_cfg; struct vpe_boot_config *vpe_cfg; + atomic_t *barrier; /* Check that there is an entry function for this state */ - entry = per_cpu(nc_asm_enter, core)[state]; + entry = per_cpu(nc_asm_enter, cpu)[state]; if (!entry) return -EINVAL; @@ -138,7 +138,8 @@ int cps_pm_enter_state(enum cps_pm_state state) if (!mips_cps_smp_in_use()) return -EINVAL; - core_cfg = &mips_cps_core_bootcfg[core]; + cluster_cfg = &mips_cps_cluster_bootcfg[cluster]; + core_cfg = &cluster_cfg->core_config[core]; vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(¤t_cpu_data)]; vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; vpe_cfg->gp = (unsigned long)current_thread_info(); @@ -150,7 +151,7 @@ int cps_pm_enter_state(enum cps_pm_state state) smp_mb__after_atomic(); /* Create a non-coherent mapping of the core ready_count */ - core_ready_count = per_cpu(ready_count, core); + core_ready_count = per_cpu(ready_count, cpu); nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), (unsigned long)core_ready_count); nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); @@ -158,7 +159,8 @@ int cps_pm_enter_state(enum cps_pm_state state) /* Ensure ready_count is zero-initialised before the assembly runs */ WRITE_ONCE(*nc_core_ready_count, 0); - coupled_barrier(&per_cpu(pm_barrier, core), online); + barrier = &per_cpu(pm_barrier, cpumask_first(&cpu_sibling_map[cpu])); + coupled_barrier(barrier, online); /* Run the generated entry code */ left = entry(online, nc_core_ready_count); @@ -629,12 +631,14 @@ out_err: static int cps_pm_online_cpu(unsigned int cpu) { - enum cps_pm_state state; - unsigned core = cpu_core(&cpu_data[cpu]); + unsigned int sibling, core; void *entry_fn, *core_rc; + enum cps_pm_state state; + + core = cpu_core(&cpu_data[cpu]); for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { - if (per_cpu(nc_asm_enter, core)[state]) + if (per_cpu(nc_asm_enter, cpu)[state]) continue; if (!test_bit(state, state_support)) continue; @@ -646,16 +650,19 @@ static int cps_pm_online_cpu(unsigned int cpu) clear_bit(state, state_support); } - per_cpu(nc_asm_enter, core)[state] = entry_fn; + for_each_cpu(sibling, &cpu_sibling_map[cpu]) + per_cpu(nc_asm_enter, sibling)[state] = entry_fn; } - if (!per_cpu(ready_count, core)) { + if (!per_cpu(ready_count, cpu)) { core_rc = kmalloc(sizeof(u32), GFP_KERNEL); if (!core_rc) { pr_err("Failed allocate core %u ready_count\n", core); return -ENOMEM; } - per_cpu(ready_count, core) = core_rc; + + for_each_cpu(sibling, &cpu_sibling_map[cpu]) + per_cpu(ready_count, sibling) = core_rc; } return 0; diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index b630604c577f..a3101f2268c6 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -107,7 +107,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) */ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { - unsigned long clone_flags = args->flags; + u64 clone_flags = args->flags; unsigned long usp = args->stack; unsigned long tls = args->tls; struct thread_info *ti = task_thread_info(p); @@ -690,18 +690,20 @@ unsigned long mips_stack_top(void) } /* Space for the VDSO, data page & GIC user page */ - top -= PAGE_ALIGN(current->thread.abi->vdso->size); - top -= PAGE_SIZE; - top -= mips_gic_present() ? PAGE_SIZE : 0; + if (current->thread.abi) { + top -= PAGE_ALIGN(current->thread.abi->vdso->size); + top -= VDSO_NR_PAGES * PAGE_SIZE; + top -= mips_gic_present() ? PAGE_SIZE : 0; + + /* Space to randomize the VDSO base */ + if (current->flags & PF_RANDOMIZE) + top -= VDSO_RANDOMIZE_SIZE; + } /* Space for cache colour alignment */ if (cpu_has_dc_aliases) top -= shm_align_mask + 1; - /* Space to randomize the VDSO base */ - if (current->flags & PF_RANDOMIZE) - top -= VDSO_RANDOMIZE_SIZE; - return top; } diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 61503a36067e..3f4c94c88124 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -922,58 +922,60 @@ static const struct pt_regs_offset regoffset_table[] = { */ int regs_query_register_offset(const char *name) { - const struct pt_regs_offset *roff; - for (roff = regoffset_table; roff->name != NULL; roff++) - if (!strcmp(roff->name, name)) - return roff->offset; - return -EINVAL; + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + + return -EINVAL; } #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) static const struct user_regset mips_regsets[] = { [REGSET_GPR] = { - .core_note_type = NT_PRSTATUS, + USER_REGSET_NOTE_TYPE(PRSTATUS), .n = ELF_NGREG, .size = sizeof(unsigned int), .align = sizeof(unsigned int), - .regset_get = gpr32_get, + .regset_get = gpr32_get, .set = gpr32_set, }, [REGSET_DSP] = { - .core_note_type = NT_MIPS_DSP, + USER_REGSET_NOTE_TYPE(MIPS_DSP), .n = NUM_DSP_REGS + 1, .size = sizeof(u32), .align = sizeof(u32), - .regset_get = dsp32_get, + .regset_get = dsp32_get, .set = dsp32_set, .active = dsp_active, }, #ifdef CONFIG_MIPS_FP_SUPPORT [REGSET_FPR] = { - .core_note_type = NT_PRFPREG, + USER_REGSET_NOTE_TYPE(PRFPREG), .n = ELF_NFPREG, .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), - .regset_get = fpr_get, + .regset_get = fpr_get, .set = fpr_set, }, [REGSET_FP_MODE] = { - .core_note_type = NT_MIPS_FP_MODE, + USER_REGSET_NOTE_TYPE(MIPS_FP_MODE), .n = 1, .size = sizeof(int), .align = sizeof(int), - .regset_get = fp_mode_get, + .regset_get = fp_mode_get, .set = fp_mode_set, }, #endif #ifdef CONFIG_CPU_HAS_MSA [REGSET_MSA] = { - .core_note_type = NT_MIPS_MSA, + USER_REGSET_NOTE_TYPE(MIPS_MSA), .n = NUM_FPU_REGS + 1, .size = 16, .align = 16, - .regset_get = msa_get, + .regset_get = msa_get, .set = msa_set, }, #endif @@ -993,47 +995,47 @@ static const struct user_regset_view user_mips_view = { static const struct user_regset mips64_regsets[] = { [REGSET_GPR] = { - .core_note_type = NT_PRSTATUS, + USER_REGSET_NOTE_TYPE(PRSTATUS), .n = ELF_NGREG, .size = sizeof(unsigned long), .align = sizeof(unsigned long), - .regset_get = gpr64_get, + .regset_get = gpr64_get, .set = gpr64_set, }, [REGSET_DSP] = { - .core_note_type = NT_MIPS_DSP, + USER_REGSET_NOTE_TYPE(MIPS_DSP), .n = NUM_DSP_REGS + 1, .size = sizeof(u64), .align = sizeof(u64), - .regset_get = dsp64_get, + .regset_get = dsp64_get, .set = dsp64_set, .active = dsp_active, }, #ifdef CONFIG_MIPS_FP_SUPPORT [REGSET_FP_MODE] = { - .core_note_type = NT_MIPS_FP_MODE, + USER_REGSET_NOTE_TYPE(MIPS_FP_MODE), .n = 1, .size = sizeof(int), .align = sizeof(int), - .regset_get = fp_mode_get, + .regset_get = fp_mode_get, .set = fp_mode_set, }, [REGSET_FPR] = { - .core_note_type = NT_PRFPREG, + USER_REGSET_NOTE_TYPE(PRFPREG), .n = ELF_NFPREG, .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), - .regset_get = fpr_get, + .regset_get = fpr_get, .set = fpr_set, }, #endif #ifdef CONFIG_CPU_HAS_MSA [REGSET_MSA] = { - .core_note_type = NT_MIPS_MSA, + USER_REGSET_NOTE_TYPE(MIPS_MSA), .n = NUM_FPU_REGS + 1, .size = 16, .align = 16, - .regset_get = msa_get, + .regset_get = msa_get, .set = msa_set, }, #endif @@ -1326,24 +1328,8 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs) return -1; } -#ifdef CONFIG_SECCOMP - if (unlikely(test_thread_flag(TIF_SECCOMP))) { - int ret, i; - struct seccomp_data sd; - unsigned long args[6]; - - sd.nr = current_thread_info()->syscall; - sd.arch = syscall_get_arch(current); - syscall_get_arguments(current, regs, args); - for (i = 0; i < 6; i++) - sd.args[i] = args[i]; - sd.instruction_pointer = KSTK_EIP(current); - - ret = __secure_computing(&sd); - if (ret == -1) - return ret; - } -#endif + if (secure_computing()) + return -1; if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->regs[2]); @@ -1367,7 +1353,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs) */ asmlinkage void syscall_trace_leave(struct pt_regs *regs) { - /* + /* * We may come here right after calling schedule_user() * or do_notify_resume(), in which case we can be in RCU * user mode. diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c index cda7983e7c18..7f1c136ad850 100644 --- a/arch/mips/kernel/relocate.c +++ b/arch/mips/kernel/relocate.c @@ -138,7 +138,7 @@ static int __init reloc_handler(u32 type, u32 *loc_orig, u32 *loc_new, apply_r_mips_hi16_rel(loc_orig, loc_new, offset); break; default: - pr_err("Unhandled relocation type %d at 0x%pK\n", type, + pr_err("Unhandled relocation type %d at 0x%p\n", type, loc_orig); return -ENOEXEC; } @@ -439,10 +439,10 @@ static void show_kernel_relocation(const char *level) { if (__kaslr_offset > 0) { printk(level); - pr_cont("Kernel relocated by 0x%pK\n", (void *)__kaslr_offset); - pr_cont(" .text @ 0x%pK\n", _text); - pr_cont(" .data @ 0x%pK\n", _sdata); - pr_cont(" .bss @ 0x%pK\n", __bss_start); + pr_cont("Kernel relocated by 0x%p\n", (void *)__kaslr_offset); + pr_cont(" .text @ 0x%p\n", _text); + pr_cont(" .data @ 0x%p\n", _sdata); + pr_cont(" .bss @ 0x%p\n", __bss_start); } } diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 2c604717e630..4947a4f39e37 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -64,10 +64,10 @@ load_a6: user_lw(t7, 24(t0)) # argument #7 from usp load_a7: user_lw(t8, 28(t0)) # argument #8 from usp loads_done: - sw t5, 16(sp) # argument #5 to ksp - sw t6, 20(sp) # argument #6 to ksp - sw t7, 24(sp) # argument #7 to ksp - sw t8, 28(sp) # argument #8 to ksp + sw t5, PT_ARG4(sp) # argument #5 to ksp + sw t6, PT_ARG5(sp) # argument #6 to ksp + sw t7, PT_ARG6(sp) # argument #7 to ksp + sw t8, PT_ARG7(sp) # argument #8 to ksp .set pop .section __ex_table,"a" diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 12a1a4ffb602..11b9b6b63e19 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -458,7 +458,7 @@ static void __init mips_parse_crashkernel(void) total_mem = memblock_phys_mem_size(); ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base, - NULL, NULL); + NULL, NULL, NULL); if (ret != 0 || crash_size <= 0) return; @@ -704,10 +704,7 @@ static void __init resource_init(void) for_each_mem_range(i, &start, &end) { struct resource *res; - res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); - if (!res) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(struct resource)); + res = memblock_alloc_or_panic(sizeof(struct resource), SMP_CACHE_BYTES); res->start = start; /* diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 82c8f9b9573c..22d4f9ff3ae2 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -36,11 +36,55 @@ enum label_id { UASM_L_LA(_not_nmi) -static DECLARE_BITMAP(core_power, NR_CPUS); static u64 core_entry_reg; static phys_addr_t cps_vec_pa; -struct core_boot_config *mips_cps_core_bootcfg; +struct cluster_boot_config *mips_cps_cluster_bootcfg; + +static void power_up_other_cluster(unsigned int cluster) +{ + u32 stat, seq_state; + unsigned int timeout; + + mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0, + CM_GCR_Cx_OTHER_BLOCK_LOCAL); + stat = read_cpc_co_stat_conf(); + mips_cm_unlock_other(); + + seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE; + seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); + if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5) + return; + + /* Set endianness & power up the CM */ + mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + write_cpc_redir_sys_config(IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)); + write_cpc_redir_pwrup_ctl(1); + mips_cm_unlock_other(); + + /* Wait for the CM to start up */ + timeout = 1000; + mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0, + CM_GCR_Cx_OTHER_BLOCK_LOCAL); + while (1) { + stat = read_cpc_co_stat_conf(); + seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE; + seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); + if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5) + break; + + if (timeout) { + mdelay(1); + timeout--; + } else { + pr_warn("Waiting for cluster %u CM to power up... STAT_CONF=0x%x\n", + cluster, stat); + mdelay(1000); + } + } + + mips_cm_unlock_other(); +} static unsigned __init core_vpe_count(unsigned int cluster, unsigned core) { @@ -178,6 +222,9 @@ static void __init cps_smp_setup(void) pr_cont(","); pr_cont("{"); + if (mips_cm_revision() >= CM_REV_CM3_5) + power_up_other_cluster(cl); + ncores = mips_cps_numcores(cl); for (c = 0; c < ncores; c++) { core_vpes = core_vpe_count(cl, c); @@ -189,6 +236,7 @@ static void __init cps_smp_setup(void) /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */ if (!cl && !c) smp_num_siblings = core_vpes; + cpumask_set_cpu(nvpes, &__cpu_primary_thread_mask); for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { cpu_set_cluster(&cpu_data[nvpes + v], cl); @@ -205,8 +253,8 @@ static void __init cps_smp_setup(void) /* Indicate present CPUs (CPU being synonymous with VPE) */ for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { - set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0); - set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0); + set_cpu_possible(v, true); + set_cpu_present(v, true); __cpu_number_map[v] = v; __cpu_logical_map[v] = v; } @@ -214,9 +262,6 @@ static void __init cps_smp_setup(void) /* Set a coherent default CCA (CWB) */ change_c0_config(CONF_CM_CMASK, 0x5); - /* Core 0 is powered up (we're running on it) */ - bitmap_set(core_power, 0, 1); - /* Initialise core 0 */ mips_cps_core_init(); @@ -236,10 +281,23 @@ static void __init cps_smp_setup(void) #endif /* CONFIG_MIPS_MT_FPAFF */ } +unsigned long calibrate_delay_is_known(void) +{ + int first_cpu_cluster = 0; + + /* The calibration has to be done on the primary CPU of the cluster */ + if (mips_cps_first_online_in_cluster(&first_cpu_cluster)) + return 0; + + return cpu_data[first_cpu_cluster].udelay_val; +} + static void __init cps_prepare_cpus(unsigned int max_cpus) { - unsigned ncores, core_vpes, c, cca; + unsigned int nclusters, ncores, core_vpes, nvpe = 0, c, cl, cca; bool cca_unsuitable, cores_limited; + struct cluster_boot_config *cluster_bootcfg; + struct core_boot_config *core_bootcfg; mips_mt_set_cpuoptions(); @@ -281,40 +339,70 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) setup_cps_vecs(); - /* Allocate core boot configuration structs */ - ncores = mips_cps_numcores(0); - mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), - GFP_KERNEL); - if (!mips_cps_core_bootcfg) { - pr_err("Failed to allocate boot config for %u cores\n", ncores); + /* Allocate cluster boot configuration structs */ + nclusters = mips_cps_numclusters(); + mips_cps_cluster_bootcfg = kcalloc(nclusters, + sizeof(*mips_cps_cluster_bootcfg), + GFP_KERNEL); + if (!mips_cps_cluster_bootcfg) goto err_out; - } - /* Allocate VPE boot configuration structs */ - for (c = 0; c < ncores; c++) { - core_vpes = core_vpe_count(0, c); - mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, - sizeof(*mips_cps_core_bootcfg[c].vpe_config), + if (nclusters > 1) + mips_cm_update_property(); + + for (cl = 0; cl < nclusters; cl++) { + /* Allocate core boot configuration structs */ + ncores = mips_cps_numcores(cl); + core_bootcfg = kcalloc(ncores, sizeof(*core_bootcfg), + GFP_KERNEL); + if (!core_bootcfg) + goto err_out; + mips_cps_cluster_bootcfg[cl].core_config = core_bootcfg; + + mips_cps_cluster_bootcfg[cl].core_power = + kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long), GFP_KERNEL); - if (!mips_cps_core_bootcfg[c].vpe_config) { - pr_err("Failed to allocate %u VPE boot configs\n", - core_vpes); + if (!mips_cps_cluster_bootcfg[cl].core_power) goto err_out; + + /* Allocate VPE boot configuration structs */ + for (c = 0; c < ncores; c++) { + int v; + core_vpes = core_vpe_count(cl, c); + core_bootcfg[c].vpe_config = kcalloc(core_vpes, + sizeof(*core_bootcfg[c].vpe_config), + GFP_KERNEL); + for (v = 0; v < core_vpes; v++) + cpumask_set_cpu(nvpe++, &mips_cps_cluster_bootcfg[cl].cpumask); + if (!core_bootcfg[c].vpe_config) + goto err_out; } } - /* Mark this CPU as booted */ - atomic_set(&mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)].vpe_mask, - 1 << cpu_vpe_id(¤t_cpu_data)); + /* Mark this CPU as powered up & booted */ + cl = cpu_cluster(¤t_cpu_data); + c = cpu_core(¤t_cpu_data); + cluster_bootcfg = &mips_cps_cluster_bootcfg[cl]; + cpu_smt_set_num_threads(core_vpes, core_vpes); + core_bootcfg = &cluster_bootcfg->core_config[c]; + bitmap_set(cluster_bootcfg->core_power, cpu_core(¤t_cpu_data), 1); + atomic_set(&core_bootcfg->vpe_mask, 1 << cpu_vpe_id(¤t_cpu_data)); return; err_out: /* Clean up allocations */ - if (mips_cps_core_bootcfg) { - for (c = 0; c < ncores; c++) - kfree(mips_cps_core_bootcfg[c].vpe_config); - kfree(mips_cps_core_bootcfg); - mips_cps_core_bootcfg = NULL; + if (mips_cps_cluster_bootcfg) { + for (cl = 0; cl < nclusters; cl++) { + cluster_bootcfg = &mips_cps_cluster_bootcfg[cl]; + ncores = mips_cps_numcores(cl); + for (c = 0; c < ncores; c++) { + core_bootcfg = &cluster_bootcfg->core_config[c]; + kfree(core_bootcfg->vpe_config); + } + kfree(mips_cps_cluster_bootcfg[c].core_config); + } + kfree(mips_cps_cluster_bootcfg); + mips_cps_cluster_bootcfg = NULL; } /* Effectively disable SMP by declaring CPUs not present */ @@ -325,13 +413,118 @@ err_out: } } -static void boot_core(unsigned int core, unsigned int vpe_id) +static void init_cluster_l2(void) { - u32 stat, seq_state; - unsigned timeout; + u32 l2_cfg, l2sm_cop, result; + + while (!mips_cm_is_l2_hci_broken) { + l2_cfg = read_gcr_redir_l2_ram_config(); + + /* If HCI is not supported, use the state machine below */ + if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_PRESENT)) + break; + if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED)) + break; + + /* If the HCI_DONE bit is set, we're finished */ + if (l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_DONE) + return; + } + + l2sm_cop = read_gcr_redir_l2sm_cop(); + if (WARN(!(l2sm_cop & CM_GCR_L2SM_COP_PRESENT), + "L2 init not supported on this system yet")) + return; + + /* Clear L2 tag registers */ + write_gcr_redir_l2_tag_state(0); + write_gcr_redir_l2_ecc(0); + + /* Ensure the L2 tag writes complete before the state machine starts */ + mb(); + + /* Wait for the L2 state machine to be idle */ + do { + l2sm_cop = read_gcr_redir_l2sm_cop(); + } while (l2sm_cop & CM_GCR_L2SM_COP_RUNNING); + + /* Start a store tag operation */ + l2sm_cop = CM_GCR_L2SM_COP_TYPE_IDX_STORETAG; + l2sm_cop <<= __ffs(CM_GCR_L2SM_COP_TYPE); + l2sm_cop |= CM_GCR_L2SM_COP_CMD_START; + write_gcr_redir_l2sm_cop(l2sm_cop); + + /* Ensure the state machine starts before we poll for completion */ + mb(); + + /* Wait for the operation to be complete */ + do { + l2sm_cop = read_gcr_redir_l2sm_cop(); + result = l2sm_cop & CM_GCR_L2SM_COP_RESULT; + result >>= __ffs(CM_GCR_L2SM_COP_RESULT); + } while (!result); + + WARN(result != CM_GCR_L2SM_COP_RESULT_DONE_OK, + "L2 state machine failed cache init with error %u\n", result); +} + +static void boot_core(unsigned int cluster, unsigned int core, + unsigned int vpe_id) +{ + struct cluster_boot_config *cluster_cfg; + u32 access, stat, seq_state; + unsigned int timeout, ncores; + + cluster_cfg = &mips_cps_cluster_bootcfg[cluster]; + ncores = mips_cps_numcores(cluster); + + if ((cluster != cpu_cluster(¤t_cpu_data)) && + bitmap_empty(cluster_cfg->core_power, ncores)) { + power_up_other_cluster(cluster); + + mips_cm_lock_other(cluster, core, 0, + CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + + /* Ensure cluster GCRs are where we expect */ + write_gcr_redir_base(read_gcr_base()); + write_gcr_redir_cpc_base(read_gcr_cpc_base()); + write_gcr_redir_gic_base(read_gcr_gic_base()); + + init_cluster_l2(); + + /* Mirror L2 configuration */ + write_gcr_redir_l2_only_sync_base(read_gcr_l2_only_sync_base()); + write_gcr_redir_l2_pft_control(read_gcr_l2_pft_control()); + write_gcr_redir_l2_pft_control_b(read_gcr_l2_pft_control_b()); + + /* Mirror ECC/parity setup */ + write_gcr_redir_err_control(read_gcr_err_control()); + + /* Set BEV base */ + write_gcr_redir_bev_base(core_entry_reg); + + mips_cm_unlock_other(); + } + + if (cluster != cpu_cluster(¤t_cpu_data)) { + mips_cm_lock_other(cluster, core, 0, + CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + + /* Ensure the core can access the GCRs */ + access = read_gcr_redir_access(); + access |= BIT(core); + write_gcr_redir_access(access); + + mips_cm_unlock_other(); + } else { + /* Ensure the core can access the GCRs */ + access = read_gcr_access(); + access |= BIT(core); + write_gcr_access(access); + } /* Select the appropriate core */ - mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); + mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); /* Set its reset vector */ if (mips_cm_is64) @@ -400,30 +593,42 @@ static void boot_core(unsigned int core, unsigned int vpe_id) mips_cm_unlock_other(); /* The core is now powered up */ - bitmap_set(core_power, core, 1); + bitmap_set(cluster_cfg->core_power, core, 1); + + /* + * Restore CM_PWRUP=0 so that the CM can power down if all the cores in + * the cluster do (eg. if they're all removed via hotplug. + */ + if (mips_cm_revision() >= CM_REV_CM3_5) { + mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + write_cpc_redir_pwrup_ctl(0); + mips_cm_unlock_other(); + } } static void remote_vpe_boot(void *dummy) { + unsigned int cluster = cpu_cluster(¤t_cpu_data); unsigned core = cpu_core(¤t_cpu_data); - struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; + struct cluster_boot_config *cluster_cfg = + &mips_cps_cluster_bootcfg[cluster]; + struct core_boot_config *core_cfg = &cluster_cfg->core_config[core]; mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data)); } static int cps_boot_secondary(int cpu, struct task_struct *idle) { + unsigned int cluster = cpu_cluster(&cpu_data[cpu]); unsigned core = cpu_core(&cpu_data[cpu]); unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); - struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; + struct cluster_boot_config *cluster_cfg = + &mips_cps_cluster_bootcfg[cluster]; + struct core_boot_config *core_cfg = &cluster_cfg->core_config[core]; struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; unsigned int remote; int err; - /* We don't yet support booting CPUs in other clusters */ - if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data)) - return -ENOSYS; - vpe_cfg->pc = (unsigned long)&smp_bootstrap; vpe_cfg->sp = __KSTK_TOS(idle); vpe_cfg->gp = (unsigned long)task_thread_info(idle); @@ -432,14 +637,15 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle) preempt_disable(); - if (!test_bit(core, core_power)) { + if (!test_bit(core, cluster_cfg->core_power)) { /* Boot a VPE on a powered down core */ - boot_core(core, vpe_id); + boot_core(cluster, core, vpe_id); goto out; } if (cpu_has_vp) { - mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); + mips_cm_lock_other(cluster, core, vpe_id, + CM_GCR_Cx_OTHER_BLOCK_LOCAL); if (mips_cm_is64) write_gcr_co_reset64_base(core_entry_reg); else @@ -576,12 +782,14 @@ static void cps_kexec_nonboot_cpu(void) static int cps_cpu_disable(void) { unsigned cpu = smp_processor_id(); + struct cluster_boot_config *cluster_cfg; struct core_boot_config *core_cfg; if (!cps_pm_support_state(CPS_PM_POWER_GATED)) return -EINVAL; - core_cfg = &mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)]; + cluster_cfg = &mips_cps_cluster_bootcfg[cpu_cluster(¤t_cpu_data)]; + core_cfg = &cluster_cfg->core_config[cpu_core(¤t_cpu_data)]; atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); smp_mb__after_atomic(); set_cpu_online(cpu, false); @@ -647,11 +855,15 @@ static void cps_cpu_die(unsigned int cpu) { } static void cps_cleanup_dead_cpu(unsigned cpu) { + unsigned int cluster = cpu_cluster(&cpu_data[cpu]); unsigned core = cpu_core(&cpu_data[cpu]); unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]); ktime_t fail_time; unsigned stat; int err; + struct cluster_boot_config *cluster_cfg; + + cluster_cfg = &mips_cps_cluster_bootcfg[cluster]; /* * Now wait for the CPU to actually offline. Without doing this that @@ -703,7 +915,7 @@ static void cps_cleanup_dead_cpu(unsigned cpu) } while (1); /* Indicate the core is powered off */ - bitmap_clear(core_power, core, 1); + bitmap_clear(cluster_cfg->core_power, core, 1); } else if (cpu_has_mipsmt) { /* * Have a CPU with access to the offlined CPUs registers wait diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 39e193cad2b9..4868e79f3b30 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -56,8 +56,10 @@ EXPORT_SYMBOL(cpu_sibling_map); cpumask_t cpu_core_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_core_map); +#ifndef CONFIG_HOTPLUG_PARALLEL static DECLARE_COMPLETION(cpu_starting); static DECLARE_COMPLETION(cpu_running); +#endif /* * A logical cpu mask containing only one VPE per core to @@ -74,6 +76,8 @@ static cpumask_t cpu_core_setup_map; cpumask_t cpu_coherent_mask; +struct cpumask __cpu_primary_thread_mask __read_mostly; + unsigned int smp_max_threads __initdata = UINT_MAX; static int __init early_nosmt(char *s) @@ -367,6 +371,9 @@ asmlinkage void start_secondary(void) * to an option instead of something based on .cputype */ +#ifdef CONFIG_HOTPLUG_PARALLEL + cpuhp_ap_sync_alive(); +#endif calibrate_delay(); cpu_data[cpu].udelay_val = loops_per_jiffy; @@ -376,8 +383,10 @@ asmlinkage void start_secondary(void) cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); +#ifndef CONFIG_HOTPLUG_PARALLEL /* Notify boot CPU that we're starting & ready to sync counters */ complete(&cpu_starting); +#endif synchronise_count_slave(cpu); @@ -386,11 +395,13 @@ asmlinkage void start_secondary(void) calculate_cpu_foreign_map(); +#ifndef CONFIG_HOTPLUG_PARALLEL /* * Notify boot CPU that we're up & online and it can safely return * from __cpu_up */ complete(&cpu_running); +#endif /* * irq will be enabled in ->smp_finish(), enabling it too early @@ -447,6 +458,12 @@ void __init smp_prepare_boot_cpu(void) set_cpu_online(0, true); } +#ifdef CONFIG_HOTPLUG_PARALLEL +int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle) +{ + return mp_ops->boot_secondary(cpu, tidle); +} +#else int __cpu_up(unsigned int cpu, struct task_struct *tidle) { int err; @@ -466,6 +483,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) wait_for_completion(&cpu_running); return 0; } +#endif #ifdef CONFIG_PROFILING /* Not really SMP stuff ... */ diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c index 71c7e5e27567..dd31e3fffd24 100644 --- a/arch/mips/kernel/spram.c +++ b/arch/mips/kernel/spram.c @@ -26,10 +26,6 @@ #define ERRCTL_SPRAM (1 << 28) -/* errctl access */ -#define read_c0_errctl(x) read_c0_ecc(x) -#define write_c0_errctl(x) write_c0_ecc(x) - /* * Different semantics to the set_c0_* function built by __BUILD_SET_C0 */ diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index 0b9b7e25b69a..8cedc83c3266 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -405,3 +405,7 @@ 464 n32 getxattrat sys_getxattrat 465 n32 listxattrat sys_listxattrat 466 n32 removexattrat sys_removexattrat +467 n32 open_tree_attr sys_open_tree_attr +468 n32 file_getattr sys_file_getattr +469 n32 file_setattr sys_file_setattr +470 n32 listns sys_listns diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index c844cd5cda62..9b92bddf06b5 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -381,3 +381,7 @@ 464 n64 getxattrat sys_getxattrat 465 n64 listxattrat sys_listxattrat 466 n64 removexattrat sys_removexattrat +467 n64 open_tree_attr sys_open_tree_attr +468 n64 file_getattr sys_file_getattr +469 n64 file_setattr sys_file_setattr +470 n64 listns sys_listns diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 349b8aad1159..f810b8a55716 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -454,3 +454,7 @@ 464 o32 getxattrat sys_getxattrat 465 o32 listxattrat sys_listxattrat 466 o32 removexattrat sys_removexattrat +467 o32 open_tree_attr sys_open_tree_attr +468 o32 file_getattr sys_file_getattr +469 o32 file_setattr sys_file_setattr +470 o32 listns sys_listns diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index dc29bd9656b0..8ec1e185b35c 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -38,6 +38,7 @@ #include <linux/kdb.h> #include <linux/irq.h> #include <linux/perf_event.h> +#include <linux/string_choices.h> #include <asm/addrspace.h> #include <asm/bootinfo.h> @@ -76,7 +77,7 @@ #include "access-helper.h" extern void check_wait(void); -extern asmlinkage void rollback_handle_int(void); +extern asmlinkage void skipover_handle_int(void); extern asmlinkage void handle_int(void); extern asmlinkage void handle_adel(void); extern asmlinkage void handle_ades(void); @@ -1705,10 +1706,10 @@ static inline __init void parity_protection_init(void) l2parity &= l1parity; /* Probe L1 ECC support */ - cp0_ectl = read_c0_ecc(); - write_c0_ecc(cp0_ectl | ERRCTL_PE); + cp0_ectl = read_c0_errctl(); + write_c0_errctl(cp0_ectl | ERRCTL_PE); back_to_back_c0_hazard(); - cp0_ectl = read_c0_ecc(); + cp0_ectl = read_c0_errctl(); /* Probe L2 ECC support */ gcr_ectl = read_gcr_err_control(); @@ -1727,9 +1728,9 @@ static inline __init void parity_protection_init(void) cp0_ectl |= ERRCTL_PE; else cp0_ectl &= ~ERRCTL_PE; - write_c0_ecc(cp0_ectl); + write_c0_errctl(cp0_ectl); back_to_back_c0_hazard(); - WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity); + WARN_ON(!!(read_c0_errctl() & ERRCTL_PE) != l1parity); /* Configure L2 ECC checking */ if (l2parity) @@ -1741,8 +1742,8 @@ static inline __init void parity_protection_init(void) gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN; WARN_ON(!!gcr_ectl != l2parity); - pr_info("Cache parity protection %sabled\n", - l1parity ? "en" : "dis"); + pr_info("Cache parity protection %s\n", + str_enabled_disabled(l1parity)); return; } @@ -1761,18 +1762,18 @@ static inline __init void parity_protection_init(void) unsigned long errctl; unsigned int l1parity_present, l2parity_present; - errctl = read_c0_ecc(); + errctl = read_c0_errctl(); errctl &= ~(ERRCTL_PE|ERRCTL_L2P); /* probe L1 parity support */ - write_c0_ecc(errctl | ERRCTL_PE); + write_c0_errctl(errctl | ERRCTL_PE); back_to_back_c0_hazard(); - l1parity_present = (read_c0_ecc() & ERRCTL_PE); + l1parity_present = (read_c0_errctl() & ERRCTL_PE); /* probe L2 parity support */ - write_c0_ecc(errctl|ERRCTL_L2P); + write_c0_errctl(errctl|ERRCTL_L2P); back_to_back_c0_hazard(); - l2parity_present = (read_c0_ecc() & ERRCTL_L2P); + l2parity_present = (read_c0_errctl() & ERRCTL_L2P); if (l1parity_present && l2parity_present) { if (l1parity) @@ -1791,20 +1792,20 @@ static inline __init void parity_protection_init(void) printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl); - write_c0_ecc(errctl); + write_c0_errctl(errctl); back_to_back_c0_hazard(); - errctl = read_c0_ecc(); + errctl = read_c0_errctl(); printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl); if (l1parity_present) - printk(KERN_INFO "Cache parity protection %sabled\n", - (errctl & ERRCTL_PE) ? "en" : "dis"); + pr_info("Cache parity protection %s\n", + str_enabled_disabled(errctl & ERRCTL_PE)); if (l2parity_present) { if (l1parity_present && l1parity) errctl ^= ERRCTL_L2P; - printk(KERN_INFO "L2 cache parity protection %sabled\n", - (errctl & ERRCTL_L2P) ? "en" : "dis"); + pr_info("L2 cache parity protection %s\n", + str_enabled_disabled(errctl & ERRCTL_L2P)); } } break; @@ -1812,11 +1813,11 @@ static inline __init void parity_protection_init(void) case CPU_5KC: case CPU_5KE: case CPU_LOONGSON32: - write_c0_ecc(0x80000000); + write_c0_errctl(0x80000000); back_to_back_c0_hazard(); /* Set the PE bit (bit 31) in the c0_errctl register. */ - printk(KERN_INFO "Cache parity protection %sabled\n", - (read_c0_ecc() & 0x80000000) ? "en" : "dis"); + pr_info("Cache parity protection %s\n", + str_enabled_disabled(read_c0_errctl() & 0x80000000)); break; case CPU_20KC: case CPU_25KF: @@ -1887,8 +1888,8 @@ asmlinkage void do_ftlb(void) if ((cpu_has_mips_r2_r6) && (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) || ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) { - pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", - read_c0_ecc()); + pr_err("FTLB error exception, cp0_errctl=0x%08x:\n", + read_c0_errctl()); pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); reg_val = read_c0_cacheerr(); pr_err("c0_cacheerr == %08x\n", reg_val); @@ -2065,7 +2066,7 @@ void *set_vi_handler(int n, vi_handler_t addr) { extern const u8 except_vec_vi[]; extern const u8 except_vec_vi_ori[], except_vec_vi_end[]; - extern const u8 rollback_except_vec_vi[]; + extern const u8 skipover_except_vec_vi[]; unsigned long handler; unsigned long old_handler = vi_handlers[n]; int srssets = current_cpu_data.srsets; @@ -2094,7 +2095,7 @@ void *set_vi_handler(int n, vi_handler_t addr) change_c0_srsmap(0xf << n*4, 0 << n*4); } - vec_start = using_rollback_handler() ? rollback_except_vec_vi : + vec_start = using_skipover_handler() ? skipover_except_vec_vi : except_vec_vi; #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) ori_offset = except_vec_vi_ori - vec_start + 2; @@ -2425,8 +2426,8 @@ void __init trap_init(void) if (board_be_init) board_be_init(); - set_except_vector(EXCCODE_INT, using_rollback_handler() ? - rollback_handle_int : handle_int); + set_except_vector(EXCCODE_INT, using_skipover_handler() ? + skipover_handle_int : handle_int); set_except_vector(EXCCODE_MOD, handle_tlbm); set_except_vector(EXCCODE_TLBL, handle_tlbl); set_except_vector(EXCCODE_TLBS, handle_tlbs); diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 4c8e3c0aa210..de096777172f 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -11,9 +11,11 @@ #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/mman.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/vdso_datastore.h> #include <asm/abi.h> #include <asm/mips-cps.h> @@ -22,20 +24,7 @@ #include <vdso/helpers.h> #include <vdso/vsyscall.h> -/* Kernel-provided data used by the VDSO. */ -static union vdso_data_store mips_vdso_data __page_aligned_data; -struct vdso_data *vdso_data = mips_vdso_data.data; - -/* - * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as - * what we map and where within the area they are mapped is determined at - * runtime. - */ -static struct page *no_pages[] = { NULL }; -static struct vm_special_mapping vdso_vvar_mapping = { - .name = "[vvar]", - .pages = no_pages, -}; +static_assert(VDSO_NR_PAGES == __VDSO_PAGES); static void __init init_vdso_image(struct mips_vdso_image *image) { @@ -89,7 +78,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mips_vdso_image *image = current->thread.abi->vdso; struct mm_struct *mm = current->mm; - unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base; + unsigned long gic_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base; struct vm_area_struct *vma; int ret; @@ -97,11 +86,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) return -EINTR; if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) { + unsigned long unused; + /* Map delay slot emulation page */ - base = mmap_region(NULL, STACK_TOP, PAGE_SIZE, - VM_READ | VM_EXEC | - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, - 0, NULL); + base = do_mmap(NULL, STACK_TOP, PAGE_SIZE, PROT_READ | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, 0, 0, &unused, + NULL); if (IS_ERR_VALUE(base)) { ret = base; goto out; @@ -117,8 +107,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) * the counter registers at the start. */ gic_size = mips_gic_present() ? PAGE_SIZE : 0; - vvar_size = gic_size + PAGE_SIZE; - size = vvar_size + image->size; + size = gic_size + VDSO_NR_PAGES * PAGE_SIZE + image->size; /* * Find a region that's large enough for us to perform the @@ -141,15 +130,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) */ if (cpu_has_dc_aliases) { base = __ALIGN_MASK(base, shm_align_mask); - base += ((unsigned long)vdso_data - gic_size) & shm_align_mask; + base += ((unsigned long)vdso_k_time_data - gic_size) & shm_align_mask; } data_addr = base + gic_size; - vdso_addr = data_addr + PAGE_SIZE; + vdso_addr = data_addr + VDSO_NR_PAGES * PAGE_SIZE; - vma = _install_special_mapping(mm, base, vvar_size, - VM_READ | VM_MAYREAD, - &vdso_vvar_mapping); + vma = vdso_install_vvar_mapping(mm, data_addr); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; @@ -159,6 +146,17 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) if (gic_size) { gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS; gic_pfn = PFN_DOWN(__pa(gic_base)); + static const struct vm_special_mapping gic_mapping = { + .name = "[gic]", + .pages = (struct page **) { NULL }, + }; + + vma = _install_special_mapping(mm, base, gic_size, VM_READ | VM_MAYREAD, + &gic_mapping); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto out; + } ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size, pgprot_noncached(vma->vm_page_prot)); @@ -166,13 +164,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) goto out; } - /* Map data page. */ - ret = remap_pfn_range(vma, data_addr, - virt_to_phys(vdso_data) >> PAGE_SHIFT, - PAGE_SIZE, vma->vm_page_prot); - if (ret) - goto out; - /* Map VDSO image. */ vma = _install_special_mapping(mm, vdso_addr, image->size, VM_READ | VM_EXEC | diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 9ff55cb80a64..2b708fac8d2c 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -61,6 +61,7 @@ SECTIONS /* read-only */ _text = .; /* Text and read-only data */ .text : { + HEAD_TEXT TEXT_TEXT SCHED_TEXT LOCK_TEXT diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 737d0d4fdcd3..2b67c44adab9 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -22,6 +22,7 @@ #include <linux/vmalloc.h> #include <linux/elf.h> #include <linux/seq_file.h> +#include <linux/string.h> #include <linux/syscalls.h> #include <linux/moduleloader.h> #include <linux/interrupt.h> @@ -582,7 +583,7 @@ static int vpe_elfload(struct vpe *v) struct module mod; /* so we can re-use the relocations code */ memset(&mod, 0, sizeof(struct module)); - strcpy(mod.name, "VPE loader"); + strscpy(mod.name, "VPE loader"); hdr = (Elf_Ehdr *) v->pbuffer; len = v->plen; |
