summaryrefslogtreecommitdiff
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 08:47:00 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 08:47:00 -0700
commite669830526a0abaf301bf408df69cde33901ac63 (patch)
tree0b6043375006d1754bbd1ab2370b0a0536546cc9 /arch/mips/kernel
parentebb067d2f4e2db59b076f9c9cba0375a8ad1e07c (diff)
parent475d5928b79bb78326a645863d46ff95c5e25e5a (diff)
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS updates from Ralf Baechle: "This is the main pull request for 3.17. It contains: - misc Cavium Octeon, BCM47xx, BCM63xx and Alchemy updates - MIPS ptrace updates and cleanups - various fixes that will also go to -stable - a number of cleanups and small non-critical fixes. - NUMA support for the Loongson 3. - more support for MSA - support for MAAR - various FP enhancements and fixes" * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (139 commits) MIPS: jz4740: remove unnecessary null test before debugfs_remove MIPS: Octeon: remove unnecessary null test before debugfs_remove_recursive MIPS: ZBOOT: implement stack protector in compressed boot phase MIPS: mipsreg: remove duplicate MIPS_CONF4_FTLBSETS_SHIFT MIPS: Bonito64: remove a duplicate define MIPS: Malta: initialise MAARs MIPS: Initialise MAARs MIPS: detect presence of MAARs MIPS: define MAAR register accessors & bits MIPS: mark MSA experimental MIPS: Don't build MSA support unless it can be used MIPS: consistently clear MSA flags when starting & copying threads MIPS: 16 byte align MSA vector context MIPS: disable preemption whilst initialising MSA MIPS: ensure MSA gets disabled during boot MIPS: fix read_msa_* & write_msa_* functions on non-MSA toolchains MIPS: fix MSA context for tasks which don't use FP first MIPS: init upper 64b of vector registers when MSA is first used MIPS: save/disable MSA in lose_fpu MIPS: preserve scalar FP CSR when switching vector context ...
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c38
-rw-r--r--arch/mips/kernel/cpu-probe.c45
-rw-r--r--arch/mips/kernel/ftrace.c56
-rw-r--r--arch/mips/kernel/irq-gic.c38
-rw-r--r--arch/mips/kernel/mcount.S13
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c42
-rw-r--r--arch/mips/kernel/pm-cps.c10
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/process.c60
-rw-r--r--arch/mips/kernel/ptrace.c223
-rw-r--r--arch/mips/kernel/ptrace32.c10
-rw-r--r--arch/mips/kernel/r4k_switch.S9
-rw-r--r--arch/mips/kernel/rtlx-cmp.c3
-rw-r--r--arch/mips/kernel/rtlx-mt.c3
-rw-r--r--arch/mips/kernel/scall32-o32.S6
-rw-r--r--arch/mips/kernel/scall64-64.S4
-rw-r--r--arch/mips/kernel/scall64-n32.S12
-rw-r--r--arch/mips/kernel/scall64-o32.S8
-rw-r--r--arch/mips/kernel/setup.c22
-rw-r--r--arch/mips/kernel/smp-cps.c12
-rw-r--r--arch/mips/kernel/smp-mt.c2
-rw-r--r--arch/mips/kernel/smp.c26
-rw-r--r--arch/mips/kernel/traps.c61
-rw-r--r--arch/mips/kernel/unaligned.c1
25 files changed, 501 insertions, 206 deletions
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 4bb5107511e2..b1d84bd4efb3 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -234,6 +234,7 @@ void output_thread_fpu_defines(void)
thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
+ OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr);
BLANK();
}
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 7faf5f2bee25..928767858b86 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -72,22 +72,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#include <asm/processor.h>
-/*
- * When this file is selected, we are definitely running a 64bit kernel.
- * So using the right regs define in asm/reg.h
- */
-#define WANT_COMPAT_REG_H
-
-/* These MUST be defined before elf.h gets included */
-extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs);
-#define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
-#define ELF_CORE_COPY_TASK_REGS(_tsk, _dest) \
-({ \
- int __res = 1; \
- elf32_core_copy_regs(*(_dest), task_pt_regs(_tsk)); \
- __res; \
-})
-
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
@@ -145,28 +129,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
value->tv_usec = rem / NSEC_PER_USEC;
}
-void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
-{
- int i;
-
- for (i = 0; i < EF_R0; i++)
- grp[i] = 0;
- grp[EF_R0] = 0;
- for (i = 1; i <= 31; i++)
- grp[EF_R0 + i] = (elf_greg_t) regs->regs[i];
- grp[EF_R26] = 0;
- grp[EF_R27] = 0;
- grp[EF_LO] = (elf_greg_t) regs->lo;
- grp[EF_HI] = (elf_greg_t) regs->hi;
- grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
- grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
- grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
- grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
-#ifdef EF_UNUSED0
- grp[EF_UNUSED0] = 0;
-#endif
-}
-
MODULE_DESCRIPTION("Binary format loader for compatibility with o32 Linux/MIPS binaries");
MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index d74f957c561e..e34b10be782e 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -54,6 +54,20 @@ static int __init dsp_disable(char *s)
__setup("nodsp", dsp_disable);
+static int mips_htw_disabled;
+
+static int __init htw_disable(char *s)
+{
+ mips_htw_disabled = 1;
+ cpu_data[0].options &= ~MIPS_CPU_HTW;
+ write_c0_pwctl(read_c0_pwctl() &
+ ~(1 << MIPS_PWCTL_PWEN_SHIFT));
+
+ return 1;
+}
+
+__setup("nohtw", htw_disable);
+
static inline void check_errata(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -130,14 +144,13 @@ static inline int __cpu_has_fpu(void)
static inline unsigned long cpu_get_msa_id(void)
{
- unsigned long status, conf5, msa_id;
+ unsigned long status, msa_id;
status = read_c0_status();
__enable_fpu(FPU_64BIT);
- conf5 = read_c0_config5();
enable_msa();
msa_id = read_msa_ir();
- write_c0_config5(conf5);
+ disable_msa();
write_c0_status(status);
return msa_id;
}
@@ -321,6 +334,9 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
c->options |= MIPS_CPU_SEGMENTS;
if (config3 & MIPS_CONF3_MSA)
c->ases |= MIPS_ASE_MSA;
+ /* Only tested on 32-bit cores */
+ if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT))
+ c->options |= MIPS_CPU_HTW;
return config3 & MIPS_CONF_M;
}
@@ -389,6 +405,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
if (config5 & MIPS_CONF5_EVA)
c->options |= MIPS_CPU_EVA;
+ if (config5 & MIPS_CONF5_MRP)
+ c->options |= MIPS_CPU_MAAR;
return config5 & MIPS_CONF_M;
}
@@ -421,6 +439,15 @@ static void decode_configs(struct cpuinfo_mips *c)
mips_probe_watch_registers(c);
+ if (cpu_has_rixi) {
+ /* Enable the RIXI exceptions */
+ write_c0_pagegrain(read_c0_pagegrain() | PG_IEC);
+ back_to_back_c0_hazard();
+ /* Verify the IEC bit is set */
+ if (read_c0_pagegrain() & PG_IEC)
+ c->options |= MIPS_CPU_RIXIEX;
+ }
+
#ifndef CONFIG_MIPS_CPS
if (cpu_has_mips_r2) {
c->core = get_ebase_cpunum();
@@ -740,6 +767,12 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
break;
+ case PRID_REV_LOONGSON3B_R1:
+ case PRID_REV_LOONGSON3B_R2:
+ c->cputype = CPU_LOONGSON3;
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3b");
+ break;
}
set_isa(c, MIPS_CPU_ISA_III);
@@ -1187,6 +1220,12 @@ void cpu_probe(void)
if (mips_dsp_disabled)
c->ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P);
+ if (mips_htw_disabled) {
+ c->options &= ~MIPS_CPU_HTW;
+ write_c0_pwctl(read_c0_pwctl() &
+ ~(1 << MIPS_PWCTL_PWEN_SHIFT));
+ }
+
if (c->options & MIPS_CPU_FPU) {
c->fpu_id = cpu_get_fpu_id();
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 8b6538750fe1..937c54bc8ccc 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -63,7 +63,7 @@ static inline int in_kernel_space(unsigned long ip)
((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
static unsigned int insn_jal_ftrace_caller __read_mostly;
-static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
+static unsigned int insn_la_mcount[2] __read_mostly;
static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
static inline void ftrace_dyn_arch_init_insns(void)
@@ -71,10 +71,10 @@ static inline void ftrace_dyn_arch_init_insns(void)
u32 *buf;
unsigned int v1;
- /* lui v1, hi16_mcount */
+ /* la v1, _mcount */
v1 = 3;
- buf = (u32 *)&insn_lui_v1_hi16_mcount;
- UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
+ buf = (u32 *)&insn_la_mcount[0];
+ UASM_i_LA(&buf, v1, MCOUNT_ADDR);
/* jal (ftrace_caller + 8), jump over the first two instruction */
buf = (u32 *)&insn_jal_ftrace_caller;
@@ -111,14 +111,47 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
unsigned int new_code2)
{
int faulted;
+ mm_segment_t old_fs;
safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
- safe_store_code(new_code2, ip + 4, faulted);
+
+ ip += 4;
+ safe_store_code(new_code2, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
+
+ ip -= 4;
+ old_fs = get_fs();
+ set_fs(get_ds());
flush_icache_range(ip, ip + 8);
+ set_fs(old_fs);
+
+ return 0;
+}
+
+static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
+ unsigned int new_code2)
+{
+ int faulted;
+ mm_segment_t old_fs;
+
+ ip += 4;
+ safe_store_code(new_code2, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ ip -= 4;
+ safe_store_code(new_code1, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ flush_icache_range(ip, ip + 8);
+ set_fs(old_fs);
+
return 0;
}
#endif
@@ -130,13 +163,14 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
*
* move at, ra
* jal _mcount --> nop
+ * sub sp, sp, 8 --> nop (CONFIG_32BIT)
*
* 2. For modules:
*
* 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
*
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
- * addiu v1, v1, low_16bit_of_mcount
+ * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
* move at, ra
* move $12, ra_address
* jalr v1
@@ -145,7 +179,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
* 2.2 For the Other situations
*
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
- * addiu v1, v1, low_16bit_of_mcount
+ * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
* move at, ra
* jalr v1
* nop | move $12, ra_address | sub sp, sp, 8
@@ -184,10 +218,14 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
unsigned int new;
unsigned long ip = rec->ip;
- new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
- insn_lui_v1_hi16_mcount;
+ new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
+#ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new);
+#else
+ return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ?
+ INSN_NOP : insn_la_mcount[1]);
+#endif
}
#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 88e4c323382c..9e9d8b9a5b97 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -28,6 +28,18 @@ unsigned int gic_irq_flags[GIC_NUM_INTRS];
/* The index into this array is the vector # of the interrupt. */
struct gic_shared_intr_map gic_shared_intr_map[GIC_NUM_INTRS];
+struct gic_pcpu_mask {
+ DECLARE_BITMAP(pcpu_mask, GIC_NUM_INTRS);
+};
+
+struct gic_pending_regs {
+ DECLARE_BITMAP(pending, GIC_NUM_INTRS);
+};
+
+struct gic_intrmask_regs {
+ DECLARE_BITMAP(intrmask, GIC_NUM_INTRS);
+};
+
static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
static struct gic_pending_regs pending_regs[NR_CPUS];
static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
@@ -177,7 +189,7 @@ unsigned int gic_compare_int(void)
return 0;
}
-unsigned int gic_get_int(void)
+void gic_get_int_mask(unsigned long *dst, const unsigned long *src)
{
unsigned int i;
unsigned long *pending, *intrmask, *pcpu_mask;
@@ -202,8 +214,17 @@ unsigned int gic_get_int(void)
bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
+ bitmap_and(dst, src, pending, GIC_NUM_INTRS);
+}
- return find_first_bit(pending, GIC_NUM_INTRS);
+unsigned int gic_get_int(void)
+{
+ DECLARE_BITMAP(interrupts, GIC_NUM_INTRS);
+
+ bitmap_fill(interrupts, GIC_NUM_INTRS);
+ gic_get_int_mask(interrupts, interrupts);
+
+ return find_first_bit(interrupts, GIC_NUM_INTRS);
}
static void gic_mask_irq(struct irq_data *d)
@@ -269,11 +290,13 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
/* Setup Intr to Pin mapping */
if (pin & GIC_MAP_TO_NMI_MSK) {
+ int i;
+
GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
/* FIXME: hack to route NMI to all cpu's */
- for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
+ for (i = 0; i < NR_CPUS; i += 32) {
GICWRITE(GIC_REG_ADDR(SHARED,
- GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
+ GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)),
0xffffffff);
}
} else {
@@ -299,9 +322,10 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
/* Init Intr Masks */
GIC_CLR_INTR_MASK(intr);
+
/* Initialise per-cpu Interrupt software masks */
- if (flags & GIC_FLAG_IPI)
- set_bit(intr, pcpu_masks[cpu].pcpu_mask);
+ set_bit(intr, pcpu_masks[cpu].pcpu_mask);
+
if ((flags & GIC_FLAG_TRANSPARENT) && (cpu_has_veic == 0))
GIC_SET_INTR_MASK(intr);
if (trigtype == GIC_TRIG_EDGE)
@@ -340,8 +364,6 @@ static void __init gic_basic_init(int numintrs, int numvpes,
cpu = intrmap[i].cpunum;
if (cpu == GIC_UNUSED)
continue;
- if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
- continue;
gic_setup_intr(i,
intrmap[i].cpunum,
intrmap[i].pin + pin_offset,
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 00940d1d5c4f..5d25462de8a6 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -80,6 +80,19 @@ _mcount:
#endif
PTR_SUBU a0, ra, 8 /* arg1: self address */
+ PTR_LA t1, _stext
+ sltu t2, a0, t1 /* t2 = (a0 < _stext) */
+ PTR_LA t1, _etext
+ sltu t3, t1, a0 /* t3 = (a0 > _etext) */
+ or t1, t2, t3
+ beqz t1, ftrace_call
+ nop
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+ PTR_SUBU a0, a0, 16 /* arg1: adjust to module's recorded callsite */
+#else
+ PTR_SUBU a0, a0, 12
+#endif
+
.globl ftrace_call
ftrace_call:
nop /* a placeholder for the call to a real tracing function */
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 4f2d9dece7ab..14bf74b0f51c 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1386,6 +1386,9 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
/* proAptiv */
#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \
((b) == 0 || (b) == 1)
+/* P5600 */
+#define IS_BOTH_COUNTERS_P5600_EVENT(b) \
+ ((b) == 0 || (b) == 1)
/* 1004K */
#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
@@ -1420,20 +1423,23 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
/*
- * User can use 0-255 raw events, where 0-127 for the events of even
- * counters, and 128-255 for odd counters. Note that bit 7 is used to
- * indicate the parity. So, for example, when user wants to take the
- * Event Num of 15 for odd counters (by referring to the user manual),
- * then 128 needs to be added to 15 as the input for the event config,
- * i.e., 143 (0x8F) to be used.
+ * For most cores the user can use 0-255 raw events, where 0-127 for the events
+ * of even counters, and 128-255 for odd counters. Note that bit 7 is used to
+ * indicate the even/odd bank selector. So, for example, when user wants to take
+ * the Event Num of 15 for odd counters (by referring to the user manual), then
+ * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
+ * to be used.
+ *
+ * Some newer cores have even more events, in which case the user can use raw
+ * events 0-511, where 0-255 are for the events of even counters, and 256-511
+ * are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
*/
static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
{
+ /* currently most cores have 7-bit event numbers */
unsigned int raw_id = config & 0xff;
unsigned int base_id = raw_id & 0x7f;
- raw_event.event_id = base_id;
-
switch (current_cpu_type()) {
case CPU_24K:
if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
@@ -1485,6 +1491,19 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
raw_event.range = P;
#endif
break;
+ case CPU_P5600:
+ /* 8-bit event numbers */
+ raw_id = config & 0x1ff;
+ base_id = raw_id & 0xff;
+ if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
case CPU_1004K:
if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
@@ -1523,6 +1542,8 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
}
+ raw_event.event_id = base_id;
+
return &raw_event;
}
@@ -1633,6 +1654,11 @@ init_hw_perf_events(void)
mipspmu.general_event_map = &mipsxxcore_event_map2;
mipspmu.cache_event_map = &mipsxxcore_cache_map2;
break;
+ case CPU_P5600:
+ mipspmu.name = "mips/P5600";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
case CPU_1004K:
mipspmu.name = "mips/1004K";
mipspmu.general_event_map = &mipsxxcore_event_map;
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index c4c2069d3a20..06147179a175 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -149,8 +149,12 @@ int cps_pm_enter_state(enum cps_pm_state state)
/* Setup the VPE to run mips_cps_pm_restore when started again */
if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ /* Power gating relies upon CPS SMP */
+ if (!mips_cps_smp_in_use())
+ return -EINVAL;
+
core_cfg = &mips_cps_core_bootcfg[core];
- vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id];
+ vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(&current_cpu_data)];
vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
vpe_cfg->gp = (unsigned long)current_thread_info();
vpe_cfg->sp = 0;
@@ -376,6 +380,10 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
memset(relocs, 0, sizeof(relocs));
if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ /* Power gating relies upon CPS SMP */
+ if (!mips_cps_smp_in_use())
+ goto out_err;
+
/*
* Save CPU state. Note the non-standard calling convention
* with the return address placed in v0 to avoid clobbering
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 037a44d962f3..097fc8d14e42 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -113,6 +113,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_vz) seq_printf(m, "%s", " vz");
if (cpu_has_msa) seq_printf(m, "%s", " msa");
if (cpu_has_eva) seq_printf(m, "%s", " eva");
+ if (cpu_has_htw) seq_printf(m, "%s", " htw");
seq_printf(m, "\n");
if (cpu_has_mmips) {
@@ -123,6 +124,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpu_data[n].srsets);
seq_printf(m, "kscratch registers\t: %d\n",
hweight8(cpu_data[n].kscratch_mask));
+ seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 0a1ec0f3beff..636b0745d7c7 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -21,7 +21,6 @@
#include <linux/mman.h>
#include <linux/personality.h>
#include <linux/sys.h>
-#include <linux/user.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/kallsyms.h>
@@ -36,6 +35,7 @@
#include <asm/pgtable.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
+#include <asm/reg.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/elf.h>
@@ -66,6 +66,7 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
clear_used_math();
clear_fpu_owner();
init_dsp();
+ clear_thread_flag(TIF_USEDMSA);
clear_thread_flag(TIF_MSA_CTX_LIVE);
disable_msa();
regs->cp0_epc = pc;
@@ -141,6 +142,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
clear_tsk_thread_flag(p, TIF_USEDFPU);
+ clear_tsk_thread_flag(p, TIF_USEDMSA);
+ clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
#ifdef CONFIG_MIPS_MT_FPAFF
clear_tsk_thread_flag(p, TIF_FPUBOUND);
@@ -152,61 +155,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
return 0;
}
-/* Fill in the fpu structure for a core dump.. */
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
-{
- int i;
-
- for (i = 0; i < NUM_FPU_REGS; i++)
- memcpy(&r[i], &current->thread.fpu.fpr[i], sizeof(*r));
-
- memcpy(&r[NUM_FPU_REGS], &current->thread.fpu.fcr31,
- sizeof(current->thread.fpu.fcr31));
-
- return 1;
-}
-
-void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs)
-{
- int i;
-
- for (i = 0; i < EF_R0; i++)
- gp[i] = 0;
- gp[EF_R0] = 0;
- for (i = 1; i <= 31; i++)
- gp[EF_R0 + i] = regs->regs[i];
- gp[EF_R26] = 0;
- gp[EF_R27] = 0;
- gp[EF_LO] = regs->lo;
- gp[EF_HI] = regs->hi;
- gp[EF_CP0_EPC] = regs->cp0_epc;
- gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr;
- gp[EF_CP0_STATUS] = regs->cp0_status;
- gp[EF_CP0_CAUSE] = regs->cp0_cause;
-#ifdef EF_UNUSED0
- gp[EF_UNUSED0] = 0;
-#endif
-}
-
-int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-{
- elf_dump_regs(*regs, task_pt_regs(tsk));
- return 1;
-}
-
-int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
-{
- int i;
-
- for (i = 0; i < NUM_FPU_REGS; i++)
- memcpy(&fpr[i], &t->thread.fpu.fpr[i], sizeof(*fpr));
-
- memcpy(&fpr[NUM_FPU_REGS], &t->thread.fpu.fcr31,
- sizeof(t->thread.fpu.fcr31));
-
- return 1;
-}
-
#ifdef CONFIG_CC_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index f639ccd5060c..645b3c4fcfba 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -24,7 +24,6 @@
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/smp.h>
-#include <linux/user.h>
#include <linux/security.h>
#include <linux/tracehook.h>
#include <linux/audit.h>
@@ -63,7 +62,7 @@ void ptrace_disable(struct task_struct *child)
* for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
* Registers are sign extended to fill the available space.
*/
-int ptrace_getregs(struct task_struct *child, __s64 __user *data)
+int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
{
struct pt_regs *regs;
int i;
@@ -74,13 +73,13 @@ int ptrace_getregs(struct task_struct *child, __s64 __user *data)
regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
- __put_user((long)regs->regs[i], data + i);
- __put_user((long)regs->lo, data + EF_LO - EF_R0);
- __put_user((long)regs->hi, data + EF_HI - EF_R0);
- __put_user((long)regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
- __put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
- __put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
- __put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
+ __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
+ __put_user((long)regs->lo, (__s64 __user *)&data->lo);
+ __put_user((long)regs->hi, (__s64 __user *)&data->hi);
+ __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
+ __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
+ __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
+ __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
return 0;
}
@@ -90,7 +89,7 @@ int ptrace_getregs(struct task_struct *child, __s64 __user *data)
* the 64-bit format. On a 32-bit kernel only the lower order half
* (according to endianness) will be used.
*/
-int ptrace_setregs(struct task_struct *child, __s64 __user *data)
+int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
{
struct pt_regs *regs;
int i;
@@ -101,10 +100,10 @@ int ptrace_setregs(struct task_struct *child, __s64 __user *data)
regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
- __get_user(regs->regs[i], data + i);
- __get_user(regs->lo, data + EF_LO - EF_R0);
- __get_user(regs->hi, data + EF_HI - EF_R0);
- __get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
+ __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
+ __get_user(regs->lo, (__s64 __user *)&data->lo);
+ __get_user(regs->hi, (__s64 __user *)&data->hi);
+ __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
/* badvaddr, status, and cause may not be written. */
@@ -129,7 +128,7 @@ int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
}
__put_user(child->thread.fpu.fcr31, data + 64);
- __put_user(current_cpu_data.fpu_id, data + 65);
+ __put_user(boot_cpu_data.fpu_id, data + 65);
return 0;
}
@@ -151,6 +150,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
}
__get_user(child->thread.fpu.fcr31, data + 64);
+ child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* FIR may not be written. */
@@ -246,36 +246,160 @@ int ptrace_set_watch_regs(struct task_struct *child,
/* regset get/set implementations */
-static int gpr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
+
+static int gpr32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
+ u32 uregs[ELF_NGREG] = {};
+ unsigned i;
+
+ for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
+ /* k0/k1 are copied as zero. */
+ if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
+ continue;
+
+ uregs[i] = regs->regs[i - MIPS32_EF_R0];
+ }
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- regs, 0, sizeof(*regs));
+ uregs[MIPS32_EF_LO] = regs->lo;
+ uregs[MIPS32_EF_HI] = regs->hi;
+ uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
+ uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
+ uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
+ uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
+ sizeof(uregs));
}
-static int gpr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
+static int gpr32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
{
- struct pt_regs newregs;
- int ret;
+ struct pt_regs *regs = task_pt_regs(target);
+ u32 uregs[ELF_NGREG];
+ unsigned start, num_regs, i;
+ int err;
+
+ start = pos / sizeof(u32);
+ num_regs = count / sizeof(u32);
+
+ if (start + num_regs > ELF_NGREG)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
+ sizeof(uregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++) {
+ /*
+ * Cast all values to signed here so that if this is a 64-bit
+ * kernel, the supplied 32-bit values will be sign extended.
+ */
+ switch (i) {
+ case MIPS32_EF_R1 ... MIPS32_EF_R25:
+ /* k0/k1 are ignored. */
+ case MIPS32_EF_R28 ... MIPS32_EF_R31:
+ regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
+ break;
+ case MIPS32_EF_LO:
+ regs->lo = (s32)uregs[i];
+ break;
+ case MIPS32_EF_HI:
+ regs->hi = (s32)uregs[i];
+ break;
+ case MIPS32_EF_CP0_EPC:
+ regs->cp0_epc = (s32)uregs[i];
+ break;
+ }
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_64BIT
+
+static int gpr64_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ u64 uregs[ELF_NGREG] = {};
+ unsigned i;
+
+ for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
+ /* k0/k1 are copied as zero. */
+ if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
+ continue;
+
+ uregs[i] = regs->regs[i - MIPS64_EF_R0];
+ }
+
+ uregs[MIPS64_EF_LO] = regs->lo;
+ uregs[MIPS64_EF_HI] = regs->hi;
+ uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
+ uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
+ uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
+ uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
+ sizeof(uregs));
+}
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &newregs,
- 0, sizeof(newregs));
- if (ret)
- return ret;
+static int gpr64_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ u64 uregs[ELF_NGREG];
+ unsigned start, num_regs, i;
+ int err;
+
+ start = pos / sizeof(u64);
+ num_regs = count / sizeof(u64);
- *task_pt_regs(target) = newregs;
+ if (start + num_regs > ELF_NGREG)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
+ sizeof(uregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++) {
+ switch (i) {
+ case MIPS64_EF_R1 ... MIPS64_EF_R25:
+ /* k0/k1 are ignored. */
+ case MIPS64_EF_R28 ... MIPS64_EF_R31:
+ regs->regs[i - MIPS64_EF_R0] = uregs[i];
+ break;
+ case MIPS64_EF_LO:
+ regs->lo = uregs[i];
+ break;
+ case MIPS64_EF_HI:
+ regs->hi = uregs[i];
+ break;
+ case MIPS64_EF_CP0_EPC:
+ regs->cp0_epc = uregs[i];
+ break;
+ }
+ }
return 0;
}
+#endif /* CONFIG_64BIT */
+
static int fpr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
@@ -337,14 +461,16 @@ enum mips_regset {
REGSET_FPR,
};
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
+
static const struct user_regset mips_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(unsigned int),
.align = sizeof(unsigned int),
- .get = gpr_get,
- .set = gpr_set,
+ .get = gpr32_get,
+ .set = gpr32_set,
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG,
@@ -364,14 +490,18 @@ static const struct user_regset_view user_mips_view = {
.n = ARRAY_SIZE(mips_regsets),
};
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_64BIT
+
static const struct user_regset mips64_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(unsigned long),
.align = sizeof(unsigned long),
- .get = gpr_get,
- .set = gpr_set,
+ .get = gpr64_get,
+ .set = gpr64_set,
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG,
@@ -384,25 +514,26 @@ static const struct user_regset mips64_regsets[] = {
};
static const struct user_regset_view user_mips64_view = {
- .name = "mips",
+ .name = "mips64",
.e_machine = ELF_ARCH,
.ei_osabi = ELF_OSABI,
.regsets = mips64_regsets,
- .n = ARRAY_SIZE(mips_regsets),
+ .n = ARRAY_SIZE(mips64_regsets),
};
+#endif /* CONFIG_64BIT */
+
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_32BIT
return &user_mips_view;
-#endif
-
+#else
#ifdef CONFIG_MIPS32_O32
- if (test_thread_flag(TIF_32BIT_REGS))
- return &user_mips_view;
+ if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
+ return &user_mips_view;
#endif
-
return &user_mips64_view;
+#endif
}
long arch_ptrace(struct task_struct *child, long request,
@@ -480,7 +611,7 @@ long arch_ptrace(struct task_struct *child, long request,
break;
case FPC_EIR:
/* implementation / version register */
- tmp = current_cpu_data.fpu_id;
+ tmp = boot_cpu_data.fpu_id;
break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
@@ -565,7 +696,7 @@ long arch_ptrace(struct task_struct *child, long request,
break;
#endif
case FPC_CSR:
- child->thread.fpu.fcr31 = data;
+ child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index b40c3ca60ee5..283b5a1967d1 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -22,7 +22,6 @@
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
-#include <linux/user.h>
#include <linux/security.h>
#include <asm/cpu.h>
@@ -32,6 +31,7 @@
#include <asm/mipsmtregs.h>
#include <asm/pgtable.h>
#include <asm/page.h>
+#include <asm/reg.h>
#include <asm/uaccess.h>
#include <asm/bootinfo.h>
@@ -129,7 +129,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
case FPC_EIR:
/* implementation / version register */
- tmp = current_cpu_data.fpu_id;
+ tmp = boot_cpu_data.fpu_id;
break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
@@ -256,11 +256,13 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (__s64 __user *) (__u64) data);
+ ret = ptrace_getregs(child,
+ (struct user_pt_regs __user *) (__u64) data);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (__s64 __user *) (__u64) data);
+ ret = ptrace_setregs(child,
+ (struct user_pt_regs __user *) (__u64) data);
break;
case PTRACE_GETFPREGS:
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 81ca3f70fe29..4c4ec1812420 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -64,8 +64,10 @@
/* Check whether we're saving scalar or vector context. */
bgtz a3, 1f
- /* Save 128b MSA vector context. */
+ /* Save 128b MSA vector context + scalar FP control & status. */
+ cfc1 t1, fcr31
msa_save_all a0
+ sw t1, THREAD_FCR31(a0)
b 2f
1: /* Save 32b/64b scalar FP context. */
@@ -142,6 +144,11 @@ LEAF(_restore_msa)
jr ra
END(_restore_msa)
+LEAF(_init_msa_upper)
+ msa_init_all_upper
+ jr ra
+ END(_init_msa_upper)
+
#endif
/*
diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c
index 758fb3cd2326..d26dcc4b46e7 100644
--- a/arch/mips/kernel/rtlx-cmp.c
+++ b/arch/mips/kernel/rtlx-cmp.c
@@ -77,6 +77,9 @@ int __init rtlx_module_init(void)
dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
"%s%d", RTLX_MODULE_NAME, i);
if (IS_ERR(dev)) {
+ while (i--)
+ device_destroy(mt_class, MKDEV(major, i));
+
err = PTR_ERR(dev);
goto out_chrdev;
}
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c
index 5a66b975989e..cb95470e2e69 100644
--- a/arch/mips/kernel/rtlx-mt.c
+++ b/arch/mips/kernel/rtlx-mt.c
@@ -103,6 +103,9 @@ int __init rtlx_module_init(void)
dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
"%s%d", RTLX_MODULE_NAME, i);
if (IS_ERR(dev)) {
+ while (i--)
+ device_destroy(mt_class, MKDEV(major, i));
+
err = PTR_ERR(dev);
goto out_chrdev;
}
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index ab02d14f1b5c..f93b4cbec739 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -67,8 +67,6 @@ NESTED(handle_sys, PT_SIZE, sp)
/*
* Ok, copy the args from the luser stack to the kernel stack.
- * t3 is the precomputed number of instruction bytes needed to
- * load or store arguments 6-8.
*/
.set push
@@ -495,8 +493,8 @@ EXPORT(sys_call_table)
PTR sys_tgkill
PTR sys_utimes
PTR sys_mbind
- PTR sys_ni_syscall /* sys_get_mempolicy */
- PTR sys_ni_syscall /* 4270 sys_set_mempolicy */
+ PTR sys_get_mempolicy
+ PTR sys_set_mempolicy /* 4270 */
PTR sys_mq_open
PTR sys_mq_unlink
PTR sys_mq_timedsend
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 010dccf128ec..03ebd9979ad2 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -347,8 +347,8 @@ EXPORT(sys_call_table)
PTR sys_tgkill /* 5225 */
PTR sys_utimes
PTR sys_mbind
- PTR sys_ni_syscall /* sys_get_mempolicy */
- PTR sys_ni_syscall /* sys_set_mempolicy */
+ PTR sys_get_mempolicy
+ PTR sys_set_mempolicy
PTR sys_mq_open /* 5230 */
PTR sys_mq_unlink
PTR sys_mq_timedsend
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index c3b3b6525df5..ebc9228e2e15 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -162,7 +162,7 @@ EXPORT(sysn32_call_table)
PTR sys_getpeername
PTR sys_socketpair
PTR compat_sys_setsockopt
- PTR sys_getsockopt
+ PTR compat_sys_getsockopt
PTR __sys_clone /* 6055 */
PTR __sys_fork
PTR compat_sys_execve
@@ -339,9 +339,9 @@ EXPORT(sysn32_call_table)
PTR compat_sys_clock_nanosleep
PTR sys_tgkill
PTR compat_sys_utimes /* 6230 */
- PTR sys_ni_syscall /* sys_mbind */
- PTR sys_ni_syscall /* sys_get_mempolicy */
- PTR sys_ni_syscall /* sys_set_mempolicy */
+ PTR compat_sys_mbind
+ PTR compat_sys_get_mempolicy
+ PTR compat_sys_set_mempolicy
PTR compat_sys_mq_open
PTR sys_mq_unlink /* 6235 */
PTR compat_sys_mq_timedsend
@@ -358,7 +358,7 @@ EXPORT(sysn32_call_table)
PTR sys_inotify_init
PTR sys_inotify_add_watch
PTR sys_inotify_rm_watch
- PTR sys_migrate_pages /* 6250 */
+ PTR compat_sys_migrate_pages /* 6250 */
PTR sys_openat
PTR sys_mkdirat
PTR sys_mknodat
@@ -379,7 +379,7 @@ EXPORT(sysn32_call_table)
PTR sys_sync_file_range
PTR sys_tee
PTR compat_sys_vmsplice /* 6270 */
- PTR sys_move_pages
+ PTR compat_sys_move_pages
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list
PTR compat_sys_kexec_load
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index bb1550b1f501..13b964fddc4a 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -473,9 +473,9 @@ EXPORT(sys32_call_table)
PTR compat_sys_clock_nanosleep /* 4265 */
PTR sys_tgkill
PTR compat_sys_utimes
- PTR sys_ni_syscall /* sys_mbind */
- PTR sys_ni_syscall /* sys_get_mempolicy */
- PTR sys_ni_syscall /* 4270 sys_set_mempolicy */
+ PTR compat_sys_mbind
+ PTR compat_sys_get_mempolicy
+ PTR compat_sys_set_mempolicy /* 4270 */
PTR compat_sys_mq_open
PTR sys_mq_unlink
PTR compat_sys_mq_timedsend
@@ -492,7 +492,7 @@ EXPORT(sys32_call_table)
PTR sys_inotify_init
PTR sys_inotify_add_watch /* 4285 */
PTR sys_inotify_rm_watch
- PTR sys_migrate_pages
+ PTR compat_sys_migrate_pages
PTR compat_sys_openat
PTR sys_mkdirat
PTR sys_mknodat /* 4290 */
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index a842154d57dc..7c1fe2b42d40 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -282,7 +282,7 @@ static unsigned long __init init_initrd(void)
* Initialize the bootmem allocator. It also setup initrd related data
* if needed.
*/
-#ifdef CONFIG_SGI_IP27
+#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
static void __init bootmem_init(void)
{
@@ -729,6 +729,25 @@ static void __init resource_init(void)
}
}
+#ifdef CONFIG_SMP
+static void __init prefill_possible_map(void)
+{
+ int i, possible = num_possible_cpus();
+
+ if (possible > nr_cpu_ids)
+ possible = nr_cpu_ids;
+
+ for (i = 0; i < possible; i++)
+ set_cpu_possible(i, true);
+ for (; i < NR_CPUS; i++)
+ set_cpu_possible(i, false);
+
+ nr_cpu_ids = possible;
+}
+#else
+static inline void prefill_possible_map(void) {}
+#endif
+
void __init setup_arch(char **cmdline_p)
{
cpu_probe();
@@ -752,6 +771,7 @@ void __init setup_arch(char **cmdline_p)
resource_init();
plat_smp_setup();
+ prefill_possible_map();
cpu_cache_init();
}
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 949f2c6827a0..e6e16a1d4add 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -14,13 +14,14 @@
#include <linux/smp.h>
#include <linux/types.h>
-#include <asm/cacheflush.h>
+#include <asm/bcache.h>
#include <asm/gic.h>
#include <asm/mips-cm.h>
#include <asm/mips-cpc.h>
#include <asm/mips_mt.h>
#include <asm/mipsregs.h>
#include <asm/pm-cps.h>
+#include <asm/r4kcache.h>
#include <asm/smp-cps.h>
#include <asm/time.h>
#include <asm/uasm.h>
@@ -132,8 +133,11 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
entry_code = (u32 *)&mips_cps_core_entry;
UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
uasm_i_addiu(&entry_code, 16, 0, cca);
- dma_cache_wback_inv((unsigned long)&mips_cps_core_entry,
- (void *)entry_code - (void *)&mips_cps_core_entry);
+ blast_dcache_range((unsigned long)&mips_cps_core_entry,
+ (unsigned long)entry_code);
+ bc_wback_inv((unsigned long)&mips_cps_core_entry,
+ (void *)entry_code - (void *)&mips_cps_core_entry);
+ __sync();
/* Allocate core boot configuration structs */
mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
@@ -360,7 +364,7 @@ void play_dead(void)
static void wait_for_sibling_halt(void *ptr_cpu)
{
unsigned cpu = (unsigned)ptr_cpu;
- unsigned vpe_id = cpu_data[cpu].vpe_id;
+ unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
unsigned halted;
unsigned long flags;
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 3babf6e4f894..21f23add04f4 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -288,6 +288,7 @@ struct plat_smp_ops vsmp_smp_ops = {
.prepare_cpus = vsmp_prepare_cpus,
};
+#ifdef CONFIG_PROC_FS
static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
unsigned long action_unused, void *data)
{
@@ -309,3 +310,4 @@ static int __init proc_cpuinfo_notifier_init(void)
}
subsys_initcall(proc_cpuinfo_notifier_init);
+#endif
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9bad52ede903..c94c4e92e17d 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -59,9 +59,16 @@ EXPORT_SYMBOL(smp_num_siblings);
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
+/* representing the core map of multi-core chips of each logical CPU */
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_core_map);
+
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
+/* representing cpus for which core maps can be computed */
+static cpumask_t cpu_core_setup_map;
+
cpumask_t cpu_coherent_mask;
static inline void set_cpu_sibling_map(int cpu)
@@ -72,7 +79,8 @@ static inline void set_cpu_sibling_map(int cpu)
if (smp_num_siblings > 1) {
for_each_cpu_mask(i, cpu_sibling_setup_map) {
- if (cpu_data[cpu].core == cpu_data[i].core) {
+ if (cpu_data[cpu].package == cpu_data[i].package &&
+ cpu_data[cpu].core == cpu_data[i].core) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
}
@@ -81,6 +89,20 @@ static inline void set_cpu_sibling_map(int cpu)
cpu_set(cpu, cpu_sibling_map[cpu]);
}
+static inline void set_cpu_core_map(int cpu)
+{
+ int i;
+
+ cpu_set(cpu, cpu_core_setup_map);
+
+ for_each_cpu_mask(i, cpu_core_setup_map) {
+ if (cpu_data[cpu].package == cpu_data[i].package) {
+ cpu_set(i, cpu_core_map[cpu]);
+ cpu_set(cpu, cpu_core_map[i]);
+ }
+ }
+}
+
struct plat_smp_ops *mp_ops;
EXPORT_SYMBOL(mp_ops);
@@ -122,6 +144,7 @@ asmlinkage void start_secondary(void)
set_cpu_online(cpu, true);
set_cpu_sibling_map(cpu);
+ set_cpu_core_map(cpu);
cpu_set(cpu, cpu_callin_map);
@@ -175,6 +198,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
current_thread_info()->cpu = 0;
mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
+ set_cpu_core_map(0);
#ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(cpu_possible_mask);
#endif
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 51706d6dd5b0..22b19c275044 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -90,6 +90,7 @@ extern asmlinkage void handle_mt(void);
extern asmlinkage void handle_dsp(void);
extern asmlinkage void handle_mcheck(void);
extern asmlinkage void handle_reserved(void);
+extern void tlb_do_page_fault_0(void);
void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
@@ -1088,13 +1089,19 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
static int enable_restore_fp_context(int msa)
{
- int err, was_fpu_owner;
+ int err, was_fpu_owner, prior_msa;
if (!used_math()) {
/* First time FP context user. */
+ preempt_disable();
err = init_fpu();
- if (msa && !err)
+ if (msa && !err) {
enable_msa();
+ _init_msa_upper();
+ set_thread_flag(TIF_USEDMSA);
+ set_thread_flag(TIF_MSA_CTX_LIVE);
+ }
+ preempt_enable();
if (!err)
set_used_math();
return err;
@@ -1134,10 +1141,11 @@ static int enable_restore_fp_context(int msa)
* This task is using or has previously used MSA. Thus we require
* that Status.FR == 1.
*/
+ preempt_disable();
was_fpu_owner = is_fpu_owner();
- err = own_fpu(0);
+ err = own_fpu_inatomic(0);
if (err)
- return err;
+ goto out;
enable_msa();
write_msa_csr(current->thread.fpu.msacsr);
@@ -1146,13 +1154,42 @@ static int enable_restore_fp_context(int msa)
/*
* If this is the first time that the task is using MSA and it has
* previously used scalar FP in this time slice then we already nave
- * FP context which we shouldn't clobber.
+ * FP context which we shouldn't clobber. We do however need to clear
+ * the upper 64b of each vector register so that this task has no
+ * opportunity to see data left behind by another.
*/
- if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner)
- return 0;
+ prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
+ if (!prior_msa && was_fpu_owner) {
+ _init_msa_upper();
+
+ goto out;
+ }
+
+ if (!prior_msa) {
+ /*
+ * Restore the least significant 64b of each vector register
+ * from the existing scalar FP context.
+ */
+ _restore_fp(current);
+
+ /*
+ * The task has not formerly used MSA, so clear the upper 64b
+ * of each vector register such that it cannot see data left
+ * behind by another task.
+ */
+ _init_msa_upper();
+ } else {
+ /* We need to restore the vector context. */
+ restore_msa(current);
+
+ /* Restore the scalar FP control & status register */
+ if (!was_fpu_owner)
+ asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31));
+ }
+
+out:
+ preempt_enable();
- /* We need to restore the vector context. */
- restore_msa(current);
return 0;
}
@@ -2114,6 +2151,12 @@ void __init trap_init(void)
set_except_vector(15, handle_fpe);
set_except_vector(16, handle_ftlb);
+
+ if (cpu_has_rixiex) {
+ set_except_vector(19, tlb_do_page_fault_0);
+ set_except_vector(20, tlb_do_page_fault_0);
+ }
+
set_except_vector(21, handle_msa);
set_except_vector(22, handle_mdmx);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 2b3517214d6d..e11906dff885 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -690,7 +690,6 @@ static void emulate_load_store_insn(struct pt_regs *regs,
case sdc1_op:
die_if_kernel("Unaligned FP access in kernel code", regs);
BUG_ON(!used_math());
- BUG_ON(!is_fpu_owner());
lose_fpu(1); /* Save FPU state for the emulator. */
res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,