summaryrefslogtreecommitdiff
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 16:55:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 16:55:55 -0700
commit807249d3ada1ff28a47c4054ca4edd479421b671 (patch)
treea9051ff7b4c31670ac89bb037c90d5baf90d449d /arch/mips/kernel
parentff474e8ca8547d09cb82ebab56d4c96f9eea01ce (diff)
parent2db97045aa40da4312f7321845bc52b136c8603a (diff)
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS updates from Ralf Baechle: "This is the main pull request for 4.3 for MIPS. Here's the summary: Three fixes that didn't make 4.2-stable: - a -Os build might compile the kernel using the MIPS16 instruction set but the R2 optimized inline functions in <uapi/asm/swab.h> are implemented using 32-bit wide instructions which is invalid. - a build error in pgtable-bits.h for a particular kernel configuration. - accessing registers of the CM GCR might have been compiled to use 64 bit accesses but these registers are onl 32 bit wide. And also a few new bits: - move the ATH79 GPIO driver to drivers/gpio - the definition of IRQCHIP_DECLARE has moved to linux/irqchip.h, change ATH79 accordingly. - fix definition of pgprot_writecombine - add an implementation of dma_map_ops.mmap - fix alignment of quiet build output for vmlinuz link - BCM47xx: Use kmemdup rather than duplicating its implementation - Netlogic: Fix 0x0x prefixes of constants. - merge Bjorn Helgaas' series to remove most of the weak keywords from function declarations. - CP0 and CP1 registers are best considered treated as unsigned values to avoid large values from becoming negative values. - improve support for the MIPS GIC timer. - enable common clock framework for Malta and SEAD3. - a number of improvments and fixes to dump_tlb(). - document the MIPS TLB dump functionality in Magic SysRq. - Cavium Octeon CN68XX improvments. - NetLogic improvments. - irq: Use access helper irq_data_get_affinity_mask. - handle MSA unaligned accesses. - a number of R6-related math-emu fixes. - support for I6400. - improvments to MSA support. - add uprobes support. - move from deprecated __initcall to arch_initcall. - remove finish_arch_switch(). - IRQ cleanups by Thomas Gleixner. - migrate to new 'set-state' interface. - random small cleanups" * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (148 commits) MIPS: UAPI: Fix unrecognized opcode WSBH/DSBH/DSHD when using MIPS16. MIPS: Fix alignment of quiet build output for vmlinuz link MIPS: math-emu: Remove unused handle_dsemul function declaration MIPS: math-emu: Add support for the MIPS R6 MAX{, A} FPU instruction MIPS: math-emu: Add support for the MIPS R6 MIN{, A} FPU instruction MIPS: math-emu: Add support for the MIPS R6 CLASS FPU instruction MIPS: math-emu: Add support for the MIPS R6 RINT FPU instruction MIPS: math-emu: Add support for the MIPS R6 MSUBF FPU instruction MIPS: math-emu: Add support for the MIPS R6 MADDF FPU instruction MIPS: math-emu: Add support for the MIPS R6 SELNEZ FPU instruction MIPS: math-emu: Add support for the MIPS R6 SELEQZ FPU instruction MIPS: math-emu: Add support for the CMP.condn.fmt R6 instruction MIPS: inst.h: Add new MIPS R6 FPU opcodes MIPS: Octeon: Fix management port MII address on Kontron S1901 MIPS: BCM47xx: Use kmemdup rather than duplicating its implementation STAGING: Octeon: Use common helpers for determining interface and port MIPS: Octeon: Support interfaces 4 and 5 MIPS: Octeon: Set up 1:1 mapping between CN68XX PKO queues and ports MIPS: Octeon: Initialize CN68XX PKO STAGING: Octeon: Support CN68XX style WQE ...
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile1
-rw-r--r--arch/mips/kernel/asm-offsets.c12
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c44
-rw-r--r--arch/mips/kernel/cevt-ds1287.c37
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c57
-rw-r--r--arch/mips/kernel/cevt-r4k.c18
-rw-r--r--arch/mips/kernel/cevt-sb1250.c45
-rw-r--r--arch/mips/kernel/cevt-txx9.c81
-rw-r--r--arch/mips/kernel/cps-vec.S4
-rw-r--r--arch/mips/kernel/cpu-probe.c56
-rw-r--r--arch/mips/kernel/idle.c1
-rw-r--r--arch/mips/kernel/mips-cm.c257
-rw-r--r--arch/mips/kernel/mips-cpc.c11
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c6
-rw-r--r--arch/mips/kernel/pm-cps.c2
-rw-r--r--arch/mips/kernel/ptrace.c88
-rw-r--r--arch/mips/kernel/r4k_fpu.S436
-rw-r--r--arch/mips/kernel/r4k_switch.S41
-rw-r--r--arch/mips/kernel/signal-common.h9
-rw-r--r--arch/mips/kernel/signal.c433
-rw-r--r--arch/mips/kernel/signal32.c207
-rw-r--r--arch/mips/kernel/signal_n32.c6
-rw-r--r--arch/mips/kernel/spram.c1
-rw-r--r--arch/mips/kernel/sysrq.c14
-rw-r--r--arch/mips/kernel/traps.c57
-rw-r--r--arch/mips/kernel/unaligned.c74
-rw-r--r--arch/mips/kernel/uprobes.c341
-rw-r--r--arch/mips/kernel/vpe.c7
28 files changed, 1652 insertions, 694 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 3156c8d253c1..d982be1ea1c3 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_MIPS_CM) += mips-cm.o
obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 072fab13645d..154e2039ea5e 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -128,6 +128,7 @@ void output_thread_defines(void)
thread.cp0_baduaddr);
OFFSET(THREAD_ECODE, task_struct, \
thread.error_code);
+ OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr);
BLANK();
}
@@ -245,17 +246,6 @@ void output_sc_defines(void)
}
#endif
-#ifdef CONFIG_MIPS32_COMPAT
-void output_sc32_defines(void)
-{
- COMMENT("Linux 32-bit sigcontext offsets.");
- OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
- OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
- OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
- BLANK();
-}
-#endif
-
void output_signal_defined(void)
{
COMMENT("Linux signal numbers.");
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index 7976457184b1..940ac00e9129 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -40,8 +40,8 @@
* The general purpose timer ticks at 1MHz independent if
* the rest of the system
*/
-static void sibyte_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+
+static int sibyte_set_periodic(struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
@@ -49,24 +49,22 @@ static void sibyte_set_mode(enum clock_event_mode mode,
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- __raw_writeq(0, cfg);
- __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
- __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
- cfg);
- break;
-
- case CLOCK_EVT_MODE_ONESHOT:
- /* Stop the timer until we actually program a shot */
- case CLOCK_EVT_MODE_SHUTDOWN:
- __raw_writeq(0, cfg);
- break;
-
- case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */
- case CLOCK_EVT_MODE_RESUME:
- ;
- }
+ __raw_writeq(0, cfg);
+ __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
+ return 0;
+}
+
+static int sibyte_shutdown(struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+
+ /* Stop the timer until we actually program a shot */
+ __raw_writeq(0, cfg);
+ return 0;
}
static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
@@ -91,7 +89,7 @@ static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
void __iomem *cfg;
unsigned long tmode;
- if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
+ if (clockevent_state_periodic(cd))
tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
else
tmode = 0;
@@ -130,7 +128,9 @@ void sb1480_clockevent_init(void)
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = sibyte_next_event;
- cd->set_mode = sibyte_set_mode;
+ cd->set_state_shutdown = sibyte_shutdown;
+ cd->set_state_periodic = sibyte_set_periodic;
+ cd->set_state_oneshot = sibyte_shutdown;
clockevents_register_device(cd);
bcm1480_mask_irq(cpu, irq);
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
index ff1f01b72270..77a5ddf53f57 100644
--- a/arch/mips/kernel/cevt-ds1287.c
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -59,27 +59,32 @@ static int ds1287_set_next_event(unsigned long delta,
return -EINVAL;
}
-static void ds1287_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int ds1287_shutdown(struct clock_event_device *evt)
{
u8 val;
spin_lock(&rtc_lock);
val = CMOS_READ(RTC_REG_B);
+ val &= ~RTC_PIE;
+ CMOS_WRITE(val, RTC_REG_B);
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- val |= RTC_PIE;
- break;
- default:
- val &= ~RTC_PIE;
- break;
- }
+ spin_unlock(&rtc_lock);
+ return 0;
+}
+static int ds1287_set_periodic(struct clock_event_device *evt)
+{
+ u8 val;
+
+ spin_lock(&rtc_lock);
+
+ val = CMOS_READ(RTC_REG_B);
+ val |= RTC_PIE;
CMOS_WRITE(val, RTC_REG_B);
spin_unlock(&rtc_lock);
+ return 0;
}
static void ds1287_event_handler(struct clock_event_device *dev)
@@ -87,11 +92,13 @@ static void ds1287_event_handler(struct clock_event_device *dev)
}
static struct clock_event_device ds1287_clockevent = {
- .name = "ds1287",
- .features = CLOCK_EVT_FEAT_PERIODIC,
- .set_next_event = ds1287_set_next_event,
- .set_mode = ds1287_set_mode,
- .event_handler = ds1287_event_handler,
+ .name = "ds1287",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .set_next_event = ds1287_set_next_event,
+ .set_state_shutdown = ds1287_shutdown,
+ .set_state_periodic = ds1287_set_periodic,
+ .tick_resume = ds1287_shutdown,
+ .event_handler = ds1287_event_handler,
};
static irqreturn_t ds1287_interrupt(int irq, void *dev_id)
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index f069460751ab..66040051151d 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -64,8 +64,7 @@ static int gt641xx_timer0_set_next_event(unsigned long delta,
return 0;
}
-static void gt641xx_timer0_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int gt641xx_timer0_shutdown(struct clock_event_device *evt)
{
u32 ctrl;
@@ -73,21 +72,39 @@ static void gt641xx_timer0_set_mode(enum clock_event_mode mode,
ctrl = GT_READ(GT_TC_CONTROL_OFS);
ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ raw_spin_unlock(&gt641xx_timer_lock);
+ return 0;
+}
+
+static int gt641xx_timer0_set_oneshot(struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ raw_spin_lock(&gt641xx_timer_lock);
+
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl &= ~GT_TC_CONTROL_SELTC0_MSK;
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK;
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ raw_spin_unlock(&gt641xx_timer_lock);
+ return 0;
+}
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- ctrl |= GT_TC_CONTROL_ENTC0_MSK;
- break;
- default:
- break;
- }
+static int gt641xx_timer0_set_periodic(struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ raw_spin_lock(&gt641xx_timer_lock);
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
raw_spin_unlock(&gt641xx_timer_lock);
+ return 0;
}
static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
@@ -95,12 +112,16 @@ static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
}
static struct clock_event_device gt641xx_timer0_clockevent = {
- .name = "gt641xx-timer0",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .irq = GT641XX_TIMER0_IRQ,
- .set_next_event = gt641xx_timer0_set_next_event,
- .set_mode = gt641xx_timer0_set_mode,
- .event_handler = gt641xx_timer0_event_handler,
+ .name = "gt641xx-timer0",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .irq = GT641XX_TIMER0_IRQ,
+ .set_next_event = gt641xx_timer0_set_next_event,
+ .set_state_shutdown = gt641xx_timer0_shutdown,
+ .set_state_periodic = gt641xx_timer0_set_periodic,
+ .set_state_oneshot = gt641xx_timer0_set_oneshot,
+ .tick_resume = gt641xx_timer0_shutdown,
+ .event_handler = gt641xx_timer0_event_handler,
};
static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id)
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index d70c4d893219..8dfe6a6e1480 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -28,12 +28,6 @@ static int mips_next_event(unsigned long delta,
return res;
}
-void mips_set_clock_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- /* Nothing to do ... */
-}
-
DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
int cp0_timer_irq_installed;
@@ -174,6 +168,11 @@ int c0_compare_int_usable(void)
return 1;
}
+unsigned int __weak get_c0_compare_int(void)
+{
+ return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+}
+
int r4k_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
@@ -189,11 +188,9 @@ int r4k_clockevent_init(void)
/*
* With vectored interrupts things are getting platform specific.
* get_c0_compare_int is a hook to allow a platform to return the
- * interrupt number of it's liking.
+ * interrupt number of its liking.
*/
- irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
- if (get_c0_compare_int)
- irq = get_c0_compare_int();
+ irq = get_c0_compare_int();
cd = &per_cpu(mips_clockevent_device, cpu);
@@ -212,7 +209,6 @@ int r4k_clockevent_init(void)
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = mips_next_event;
- cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
clockevents_register_device(cd);
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index 5ea6d6b1de15..3d860efd63b9 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -38,8 +38,20 @@
* The general purpose timer ticks at 1MHz independent if
* the rest of the system
*/
-static void sibyte_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+
+static int sibyte_shutdown(struct clock_event_device *evt)
+{
+ void __iomem *cfg;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(smp_processor_id(), R_SCD_TIMER_CFG));
+
+ /* Stop the timer until we actually program a shot */
+ __raw_writeq(0, cfg);
+
+ return 0;
+}
+
+static int sibyte_set_periodic(struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
@@ -47,24 +59,11 @@ static void sibyte_set_mode(enum clock_event_mode mode,
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- __raw_writeq(0, cfg);
- __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
- __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
- cfg);
- break;
-
- case CLOCK_EVT_MODE_ONESHOT:
- /* Stop the timer until we actually program a shot */
- case CLOCK_EVT_MODE_SHUTDOWN:
- __raw_writeq(0, cfg);
- break;
-
- case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */
- case CLOCK_EVT_MODE_RESUME:
- ;
- }
+ __raw_writeq(0, cfg);
+ __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
+
+ return 0;
}
static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
@@ -89,7 +88,7 @@ static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
void __iomem *cfg;
unsigned long tmode;
- if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
+ if (clockevent_state_periodic(cd))
tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
else
tmode = 0;
@@ -129,7 +128,9 @@ void sb1250_clockevent_init(void)
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = sibyte_next_event;
- cd->set_mode = sibyte_set_mode;
+ cd->set_state_shutdown = sibyte_shutdown;
+ cd->set_state_periodic = sibyte_set_periodic;
+ cd->set_state_oneshot = sibyte_shutdown;
clockevents_register_device(cd);
sb1250_mask_irq(cpu, irq);
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index 723932441ecc..537eefdf838f 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -85,36 +85,54 @@ static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr)
__raw_writel(0, &tmrptr->tisr);
}
-static void txx9tmr_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int txx9tmr_set_state_periodic(struct clock_event_device *evt)
{
struct txx9_clock_event_device *txx9_cd =
container_of(evt, struct txx9_clock_event_device, cd);
struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
txx9tmr_stop_and_clear(tmrptr);
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- __raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE,
- &tmrptr->itmr);
- /* start timer */
- __raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >>
- evt->shift,
- &tmrptr->cpra);
- __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- __raw_writel(0, &tmrptr->itmr);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- __raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
- break;
- case CLOCK_EVT_MODE_RESUME:
- __raw_writel(TIMER_CCD, &tmrptr->ccdr);
- __raw_writel(0, &tmrptr->itmr);
- break;
- }
+
+ __raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE, &tmrptr->itmr);
+ /* start timer */
+ __raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >> evt->shift,
+ &tmrptr->cpra);
+ __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ return 0;
+}
+
+static int txx9tmr_set_state_oneshot(struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
+ return 0;
+}
+
+static int txx9tmr_set_state_shutdown(struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(0, &tmrptr->itmr);
+ return 0;
+}
+
+static int txx9tmr_tick_resume(struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(TIMER_CCD, &tmrptr->ccdr);
+ __raw_writel(0, &tmrptr->itmr);
+ return 0;
}
static int txx9tmr_set_next_event(unsigned long delta,
@@ -133,12 +151,15 @@ static int txx9tmr_set_next_event(unsigned long delta,
static struct txx9_clock_event_device txx9_clock_event_device = {
.cd = {
- .name = "TXx9",
- .features = CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_ONESHOT,
- .rating = 200,
- .set_mode = txx9tmr_set_mode,
- .set_next_event = txx9tmr_set_next_event,
+ .name = "TXx9",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_state_shutdown = txx9tmr_set_state_shutdown,
+ .set_state_periodic = txx9tmr_set_state_periodic,
+ .set_state_oneshot = txx9tmr_set_state_oneshot,
+ .tick_resume = txx9tmr_tick_resume,
+ .set_next_event = txx9tmr_set_next_event,
},
};
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 1b6ca634e646..9f71c06aebf6 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -152,7 +152,7 @@ dcache_done:
/* Enter the coherent domain */
li t0, 0xff
- PTR_S t0, GCR_CL_COHERENCE_OFS(v1)
+ sw t0, GCR_CL_COHERENCE_OFS(v1)
ehb
/* Jump to kseg0 */
@@ -302,7 +302,7 @@ LEAF(mips_cps_boot_vpes)
PTR_L t0, 0(t0)
/* Calculate a pointer to this cores struct core_boot_config */
- PTR_L t0, GCR_CL_ID_OFS(t0)
+ lw t0, GCR_CL_ID_OFS(t0)
li t1, COREBOOTCFG_SIZE
mul t0, t0, t1
PTR_LA t1, mips_cps_core_bootcfg
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index dbe0792fc9c1..571a8e6ea5bd 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -32,6 +32,9 @@
#include <asm/spram.h>
#include <asm/uaccess.h>
+/* Hardware capabilities */
+unsigned int elf_hwcap __read_mostly;
+
/*
* Get the FPU Implementation/Revision.
*/
@@ -188,7 +191,7 @@ __setup("nohtw", htw_disable);
static int mips_ftlb_disabled;
static int mips_has_ftlb_configured;
-static void set_ftlb_enable(struct cpuinfo_mips *c, int enable);
+static int set_ftlb_enable(struct cpuinfo_mips *c, int enable);
static int __init ftlb_disable(char *s)
{
@@ -202,7 +205,10 @@ static int __init ftlb_disable(char *s)
return 1;
/* Disable it in the boot cpu */
- set_ftlb_enable(&cpu_data[0], 0);
+ if (set_ftlb_enable(&cpu_data[0], 0)) {
+ pr_warn("Can't turn FTLB off\n");
+ return 1;
+ }
back_to_back_c0_hazard();
@@ -364,30 +370,41 @@ static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
return 3;
}
-static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
+static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
{
- unsigned int config6;
+ unsigned int config;
/* It's implementation dependent how the FTLB can be enabled */
switch (c->cputype) {
case CPU_PROAPTIV:
case CPU_P5600:
/* proAptiv & related cores use Config6 to enable the FTLB */
- config6 = read_c0_config6();
+ config = read_c0_config6();
/* Clear the old probability value */
- config6 &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
+ config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
if (enable)
/* Enable FTLB */
- write_c0_config6(config6 |
+ write_c0_config6(config |
(calculate_ftlb_probability(c)
<< MIPS_CONF6_FTLBP_SHIFT)
| MIPS_CONF6_FTLBEN);
else
/* Disable FTLB */
- write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN);
- back_to_back_c0_hazard();
+ write_c0_config6(config & ~MIPS_CONF6_FTLBEN);
break;
+ case CPU_I6400:
+ /* I6400 & related cores use Config7 to configure FTLB */
+ config = read_c0_config7();
+ /* Clear the old probability value */
+ config &= ~(3 << MIPS_CONF7_FTLBP_SHIFT);
+ write_c0_config7(config | (calculate_ftlb_probability(c)
+ << MIPS_CONF7_FTLBP_SHIFT));
+ break;
+ default:
+ return 1;
}
+
+ return 0;
}
static inline unsigned int decode_config0(struct cpuinfo_mips *c)
@@ -524,6 +541,8 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
}
if (config3 & MIPS_CONF3_CDMM)
c->options |= MIPS_CPU_CDMM;
+ if (config3 & MIPS_CONF3_SP)
+ c->options |= MIPS_CPU_SP;
return config3 & MIPS_CONF_M;
}
@@ -540,7 +559,16 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
if (cpu_has_tlb) {
if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
c->options |= MIPS_CPU_TLBINV;
- mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+ /*
+ * This is a bit ugly. R6 has dropped that field from
+ * config4 and the only valid configuration is VTLB+FTLB so
+ * set a good value for mmuextdef for that case.
+ */
+ if (cpu_has_mips_r6)
+ mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT;
+ else
+ mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+
switch (mmuextdef) {
case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
@@ -1121,6 +1149,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_P5600;
__cpu_name[cpu] = "MIPS P5600";
break;
+ case PRID_IMP_I6400:
+ c->cputype = CPU_I6400;
+ __cpu_name[cpu] = "MIPS I6400";
+ break;
case PRID_IMP_M5150:
c->cputype = CPU_M5150;
__cpu_name[cpu] = "MIPS M5150";
@@ -1492,10 +1524,14 @@ void cpu_probe(void)
else
c->srsets = 1;
+ if (cpu_has_mips_r6)
+ elf_hwcap |= HWCAP_MIPS_R6;
+
if (cpu_has_msa) {
c->msa_id = cpu_get_msa_id();
WARN(c->msa_id & MSA_IR_WRPF,
"Vector register partitioning unimplemented!");
+ elf_hwcap |= HWCAP_MIPS_MSA;
}
cpu_probe_vmbits(c);
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index e4f62b7875d2..ab1478d5a4db 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -196,6 +196,7 @@ void __init check_wait(void)
case CPU_INTERAPTIV:
case CPU_M5150:
case CPU_QEMU_GENERIC:
+ case CPU_I6400:
cpu_wait = r4k_wait;
if (read_c0_config7() & MIPS_CONF7_WII)
cpu_wait = r4k_wait_irqoff;
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 85bbe9b96759..b8ceee576cdf 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -15,11 +15,131 @@
void __iomem *mips_cm_base;
void __iomem *mips_cm_l2sync_base;
+int mips_cm_is64;
+
+static char *cm2_tr[8] = {
+ "mem", "gcr", "gic", "mmio",
+ "0x04", "cpc", "0x06", "0x07"
+};
+
+/* CM3 Tag ECC transation type */
+static char *cm3_tr[16] = {
+ [0x0] = "ReqNoData",
+ [0x1] = "0x1",
+ [0x2] = "ReqWData",
+ [0x3] = "0x3",
+ [0x4] = "IReqNoResp",
+ [0x5] = "IReqWResp",
+ [0x6] = "IReqNoRespDat",
+ [0x7] = "IReqWRespDat",
+ [0x8] = "RespNoData",
+ [0x9] = "RespDataFol",
+ [0xa] = "RespWData",
+ [0xb] = "RespDataOnly",
+ [0xc] = "IRespNoData",
+ [0xd] = "IRespDataFol",
+ [0xe] = "IRespWData",
+ [0xf] = "IRespDataOnly"
+};
+
+static char *cm2_cmd[32] = {
+ [0x00] = "0x00",
+ [0x01] = "Legacy Write",
+ [0x02] = "Legacy Read",
+ [0x03] = "0x03",
+ [0x04] = "0x04",
+ [0x05] = "0x05",
+ [0x06] = "0x06",
+ [0x07] = "0x07",
+ [0x08] = "Coherent Read Own",
+ [0x09] = "Coherent Read Share",
+ [0x0a] = "Coherent Read Discard",
+ [0x0b] = "Coherent Ready Share Always",
+ [0x0c] = "Coherent Upgrade",
+ [0x0d] = "Coherent Writeback",
+ [0x0e] = "0x0e",
+ [0x0f] = "0x0f",
+ [0x10] = "Coherent Copyback",
+ [0x11] = "Coherent Copyback Invalidate",
+ [0x12] = "Coherent Invalidate",
+ [0x13] = "Coherent Write Invalidate",
+ [0x14] = "Coherent Completion Sync",
+ [0x15] = "0x15",
+ [0x16] = "0x16",
+ [0x17] = "0x17",
+ [0x18] = "0x18",
+ [0x19] = "0x19",
+ [0x1a] = "0x1a",
+ [0x1b] = "0x1b",
+ [0x1c] = "0x1c",
+ [0x1d] = "0x1d",
+ [0x1e] = "0x1e",
+ [0x1f] = "0x1f"
+};
+
+/* CM3 Tag ECC command type */
+static char *cm3_cmd[16] = {
+ [0x0] = "Legacy Read",
+ [0x1] = "Legacy Write",
+ [0x2] = "Coherent Read Own",
+ [0x3] = "Coherent Read Share",
+ [0x4] = "Coherent Read Discard",
+ [0x5] = "Coherent Evicted",
+ [0x6] = "Coherent Upgrade",
+ [0x7] = "Coherent Upgrade for Store Conditional",
+ [0x8] = "Coherent Writeback",
+ [0x9] = "Coherent Write Invalidate",
+ [0xa] = "0xa",
+ [0xb] = "0xb",
+ [0xc] = "0xc",
+ [0xd] = "0xd",
+ [0xe] = "0xe",
+ [0xf] = "0xf"
+};
+
+/* CM3 Tag ECC command group */
+static char *cm3_cmd_group[8] = {
+ [0x0] = "Normal",
+ [0x1] = "Registers",
+ [0x2] = "TLB",
+ [0x3] = "0x3",
+ [0x4] = "L1I",
+ [0x5] = "L1D",
+ [0x6] = "L3",
+ [0x7] = "L2"
+};
+
+static char *cm2_core[8] = {
+ "Invalid/OK", "Invalid/Data",
+ "Shared/OK", "Shared/Data",
+ "Modified/OK", "Modified/Data",
+ "Exclusive/OK", "Exclusive/Data"
+};
+
+static char *cm2_causes[32] = {
+ "None", "GC_WR_ERR", "GC_RD_ERR", "COH_WR_ERR",
+ "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
+ "0x08", "0x09", "0x0a", "0x0b",
+ "0x0c", "0x0d", "0x0e", "0x0f",
+ "0x10", "0x11", "0x12", "0x13",
+ "0x14", "0x15", "0x16", "INTVN_WR_ERR",
+ "INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
+ "0x1c", "0x1d", "0x1e", "0x1f"
+};
+
+static char *cm3_causes[32] = {
+ "0x0", "MP_CORRECTABLE_ECC_ERR", "MP_REQUEST_DECODE_ERR",
+ "MP_UNCORRECTABLE_ECC_ERR", "MP_PARITY_ERR", "MP_COHERENCE_ERR",
+ "CMBIU_REQUEST_DECODE_ERR", "CMBIU_PARITY_ERR", "CMBIU_AXI_RESP_ERR",
+ "0x9", "RBI_BUS_ERR", "0xb", "0xc", "0xd", "0xe", "0xf", "0x10",
+ "0x11", "0x12", "0x13", "0x14", "0x15", "0x16", "0x17", "0x18",
+ "0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f"
+};
phys_addr_t __mips_cm_phys_base(void)
{
u32 config3 = read_c0_config3();
- u32 cmgcr;
+ unsigned long cmgcr;
/* Check the CMGCRBase register is implemented */
if (!(config3 & MIPS_CONF3_CMGCR))
@@ -81,6 +201,13 @@ int mips_cm_probe(void)
phys_addr_t addr;
u32 base_reg;
+ /*
+ * No need to probe again if we have already been
+ * here before.
+ */
+ if (mips_cm_base)
+ return 0;
+
addr = mips_cm_phys_base();
BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr);
if (!addr)
@@ -117,5 +244,133 @@ int mips_cm_probe(void)
/* probe for an L2-only sync region */
mips_cm_probe_l2sync();
+ /* determine register width for this CM */
+ mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
+
return 0;
}
+
+void mips_cm_error_report(void)
+{
+ unsigned long revision = mips_cm_revision();
+ /*
+ * CM3 has a 64-bit Error cause register with 0:57 containing the error
+ * info and 63:58 the error type. For old CMs, everything is contained
+ * in a single 32-bit register (0:26 and 31:27 respectively). Even
+ * though the cm_error is u64, we will simply ignore the upper word
+ * for CM2.
+ */
+ u64 cm_error = read_gcr_error_cause();
+ int cm_error_cause_sft = CM_GCR_ERROR_CAUSE_ERRTYPE_SHF +
+ ((revision >= CM_REV_CM3) ? 31 : 0);
+ unsigned long cm_addr = read_gcr_error_addr();
+ unsigned long cm_other = read_gcr_error_mult();
+ int ocause, cause;
+ char buf[256];
+
+ if (!mips_cm_present())
+ return;
+
+ cause = cm_error >> cm_error_cause_sft;
+
+ if (!cause)
+ /* All good */
+ return;
+
+ ocause = cm_other >> CM_GCR_ERROR_MULT_ERR2ND_SHF;
+ if (revision < CM_REV_CM3) { /* CM2 */
+ if (cause < 16) {
+ unsigned long cca_bits = (cm_error >> 15) & 7;
+ unsigned long tr_bits = (cm_error >> 12) & 7;
+ unsigned long cmd_bits = (cm_error >> 7) & 0x1f;
+ unsigned long stag_bits = (cm_error >> 3) & 15;
+ unsigned long sport_bits = (cm_error >> 0) & 7;
+
+ snprintf(buf, sizeof(buf),
+ "CCA=%lu TR=%s MCmd=%s STag=%lu "
+ "SPort=%lu\n", cca_bits, cm2_tr[tr_bits],
+ cm2_cmd[cmd_bits], stag_bits, sport_bits);
+ } else {
+ /* glob state & sresp together */
+ unsigned long c3_bits = (cm_error >> 18) & 7;
+ unsigned long c2_bits = (cm_error >> 15) & 7;
+ unsigned long c1_bits = (cm_error >> 12) & 7;
+ unsigned long c0_bits = (cm_error >> 9) & 7;
+ unsigned long sc_bit = (cm_error >> 8) & 1;
+ unsigned long cmd_bits = (cm_error >> 3) & 0x1f;
+ unsigned long sport_bits = (cm_error >> 0) & 7;
+
+ snprintf(buf, sizeof(buf),
+ "C3=%s C2=%s C1=%s C0=%s SC=%s "
+ "MCmd=%s SPort=%lu\n",
+ cm2_core[c3_bits], cm2_core[c2_bits],
+ cm2_core[c1_bits], cm2_core[c0_bits],
+ sc_bit ? "True" : "False",
+ cm2_cmd[cmd_bits], sport_bits);
+ }
+ pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error,
+ cm2_causes[cause], buf);
+ pr_err("CM_ADDR =%08lx\n", cm_addr);
+ pr_err("CM_OTHER=%08lx %s\n", cm_other, cm2_causes[ocause]);
+ } else { /* CM3 */
+ /* Used by cause == {1,2,3} */
+ unsigned long core_id_bits = (cm_error >> 22) & 0xf;
+ unsigned long vp_id_bits = (cm_error >> 18) & 0xf;
+ unsigned long cmd_bits = (cm_error >> 14) & 0xf;
+ unsigned long cmd_group_bits = (cm_error >> 11) & 0xf;
+ unsigned long cm3_cca_bits = (cm_error >> 8) & 7;
+ unsigned long mcp_bits = (cm_error >> 5) & 0xf;
+ unsigned long cm3_tr_bits = (cm_error >> 1) & 0xf;
+ unsigned long sched_bit = cm_error & 0x1;
+
+ if (cause == 1 || cause == 3) { /* Tag ECC */
+ unsigned long tag_ecc = (cm_error >> 57) & 0x1;
+ unsigned long tag_way_bits = (cm_error >> 29) & 0xffff;
+ unsigned long dword_bits = (cm_error >> 49) & 0xff;
+ unsigned long data_way_bits = (cm_error >> 45) & 0xf;
+ unsigned long data_sets_bits = (cm_error >> 29) & 0xfff;
+ unsigned long bank_bit = (cm_error >> 28) & 0x1;
+ snprintf(buf, sizeof(buf),
+ "%s ECC Error: Way=%lu (DWORD=%lu, Sets=%lu)"
+ "Bank=%lu CoreID=%lu VPID=%lu Command=%s"
+ "Command Group=%s CCA=%lu MCP=%d"
+ "Transaction type=%s Scheduler=%lu\n",
+ tag_ecc ? "TAG" : "DATA",
+ tag_ecc ? (unsigned long)ffs(tag_way_bits) - 1 :
+ data_way_bits, bank_bit, dword_bits,
+ data_sets_bits,
+ core_id_bits, vp_id_bits,
+ cm3_cmd[cmd_bits],
+ cm3_cmd_group[cmd_group_bits],
+ cm3_cca_bits, 1 << mcp_bits,
+ cm3_tr[cm3_tr_bits], sched_bit);
+ } else if (cause == 2) {
+ unsigned long data_error_type = (cm_error >> 41) & 0xfff;
+ unsigned long data_decode_cmd = (cm_error >> 37) & 0xf;
+ unsigned long data_decode_group = (cm_error >> 34) & 0x7;
+ unsigned long data_decode_destination_id = (cm_error >> 28) & 0x3f;
+
+ snprintf(buf, sizeof(buf),
+ "Decode Request Error: Type=%lu, Command=%lu"
+ "Command Group=%lu Destination ID=%lu"
+ "CoreID=%lu VPID=%lu Command=%s"
+ "Command Group=%s CCA=%lu MCP=%d"
+ "Transaction type=%s Scheduler=%lu\n",
+ data_error_type, data_decode_cmd,
+ data_decode_group, data_decode_destination_id,
+ core_id_bits, vp_id_bits,
+ cm3_cmd[cmd_bits],
+ cm3_cmd_group[cmd_group_bits],
+ cm3_cca_bits, 1 << mcp_bits,
+ cm3_tr[cm3_tr_bits], sched_bit);
+ }
+
+ pr_err("CM_ERROR=%llx %s <%s>\n", cm_error,
+ cm3_causes[cause], buf);
+ pr_err("CM_ADDR =%lx\n", cm_addr);
+ pr_err("CM_OTHER=%lx %s\n", cm_other, cm3_causes[ocause]);
+ }
+
+ /* reprime cause register */
+ write_gcr_error_cause(0);
+}
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 11964501c4b0..8af4d627b68b 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -21,9 +21,16 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
-phys_addr_t __weak mips_cpc_phys_base(void)
+/**
+ * mips_cpc_phys_base - retrieve the physical base address of the CPC
+ *
+ * This function returns the physical base address of the Cluster Power
+ * Controller memory mapped registers, or 0 if no Cluster Power Controller
+ * is present.
+ */
+static phys_addr_t mips_cpc_phys_base(void)
{
- u32 cpc_base;
+ unsigned long cpc_base;
if (!mips_cm_present())
return 0;
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index cc1b6fadf089..d7b8dd43147a 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1556,6 +1556,7 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
#endif
break;
case CPU_P5600:
+ case CPU_I6400:
/* 8-bit event numbers */
raw_id = config & 0x1ff;
base_id = raw_id & 0xff;
@@ -1717,6 +1718,11 @@ init_hw_perf_events(void)
mipspmu.general_event_map = &mipsxxcore_event_map2;
mipspmu.cache_event_map = &mipsxxcore_cache_map2;
break;
+ case CPU_I6400:
+ mipspmu.name = "mips/I6400";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
case CPU_1004K:
mipspmu.name = "mips/1004K";
mipspmu.general_event_map = &mipsxxcore_event_map;
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 06147179a175..f63a289977cc 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -267,6 +267,7 @@ static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
/* CPUs which do not require the workaround */
case CPU_P5600:
+ case CPU_I6400:
return 0;
default:
@@ -671,6 +672,7 @@ static int __init cps_pm_init(void)
case CPU_PROAPTIV:
case CPU_M5150:
case CPU_P5600:
+ case CPU_I6400:
stype_intervention = 0x2;
stype_memory = 0x3;
stype_ordering = 0x10;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index e933a309f2ea..4f0ac78d17f1 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -25,6 +25,7 @@
#include <linux/regset.h>
#include <linux/smp.h>
#include <linux/security.h>
+#include <linux/stddef.h>
#include <linux/tracehook.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
@@ -490,6 +491,93 @@ enum mips_regset {
REGSET_FPR,
};
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(reg, r) { \
+ .name = #reg, \
+ .offset = offsetof(struct pt_regs, r) \
+}
+
+#define REG_OFFSET_END { \
+ .name = NULL, \
+ .offset = 0 \
+}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(r0, regs[0]),
+ REG_OFFSET_NAME(r1, regs[1]),
+ REG_OFFSET_NAME(r2, regs[2]),
+ REG_OFFSET_NAME(r3, regs[3]),
+ REG_OFFSET_NAME(r4, regs[4]),
+ REG_OFFSET_NAME(r5, regs[5]),
+ REG_OFFSET_NAME(r6, regs[6]),
+ REG_OFFSET_NAME(r7, regs[7]),
+ REG_OFFSET_NAME(r8, regs[8]),
+ REG_OFFSET_NAME(r9, regs[9]),
+ REG_OFFSET_NAME(r10, regs[10]),
+ REG_OFFSET_NAME(r11, regs[11]),
+ REG_OFFSET_NAME(r12, regs[12]),
+ REG_OFFSET_NAME(r13, regs[13]),
+ REG_OFFSET_NAME(r14, regs[14]),
+ REG_OFFSET_NAME(r15, regs[15]),
+ REG_OFFSET_NAME(r16, regs[16]),
+ REG_OFFSET_NAME(r17, regs[17]),
+ REG_OFFSET_NAME(r18, regs[18]),
+ REG_OFFSET_NAME(r19, regs[19]),
+ REG_OFFSET_NAME(r20, regs[20]),
+ REG_OFFSET_NAME(r21, regs[21]),
+ REG_OFFSET_NAME(r22, regs[22]),
+ REG_OFFSET_NAME(r23, regs[23]),
+ REG_OFFSET_NAME(r24, regs[24]),
+ REG_OFFSET_NAME(r25, regs[25]),
+ REG_OFFSET_NAME(r26, regs[26]),
+ REG_OFFSET_NAME(r27, regs[27]),
+ REG_OFFSET_NAME(r28, regs[28]),
+ REG_OFFSET_NAME(r29, regs[29]),
+ REG_OFFSET_NAME(r30, regs[30]),
+ REG_OFFSET_NAME(r31, regs[31]),
+ REG_OFFSET_NAME(c0_status, cp0_status),
+ REG_OFFSET_NAME(hi, hi),
+ REG_OFFSET_NAME(lo, lo),
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ REG_OFFSET_NAME(acx, acx),
+#endif
+ REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
+ REG_OFFSET_NAME(c0_cause, cp0_cause),
+ REG_OFFSET_NAME(c0_epc, cp0_epc),
+#ifdef CONFIG_MIPS_MT_SMTC
+ REG_OFFSET_NAME(c0_tcstatus, cp0_tcstatus),
+#endif
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ REG_OFFSET_NAME(mpl0, mpl[0]),
+ REG_OFFSET_NAME(mpl1, mpl[1]),
+ REG_OFFSET_NAME(mpl2, mpl[2]),
+ REG_OFFSET_NAME(mtp0, mtp[0]),
+ REG_OFFSET_NAME(mtp1, mtp[1]),
+ REG_OFFSET_NAME(mtp2, mtp[2]),
+#endif
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
static const struct user_regset mips_regsets[] = {
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 1d88af26ba82..f09546ee2cdc 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -13,6 +13,7 @@
* Copyright (C) 1999, 2001 Silicon Graphics, Inc.
*/
#include <asm/asm.h>
+#include <asm/asmmacro.h>
#include <asm/errno.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
@@ -35,6 +36,14 @@
.set noreorder
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
LEAF(_save_fp_context)
.set push
SET_HARDFLOAT
@@ -54,117 +63,60 @@ LEAF(_save_fp_context)
nop
#endif
/* Store the 16 odd double precision registers */
- EX sdc1 $f1, SC_FPREGS+8(a0)
- EX sdc1 $f3, SC_FPREGS+24(a0)
- EX sdc1 $f5, SC_FPREGS+40(a0)
- EX sdc1 $f7, SC_FPREGS+56(a0)
- EX sdc1 $f9, SC_FPREGS+72(a0)
- EX sdc1 $f11, SC_FPREGS+88(a0)
- EX sdc1 $f13, SC_FPREGS+104(a0)
- EX sdc1 $f15, SC_FPREGS+120(a0)
- EX sdc1 $f17, SC_FPREGS+136(a0)
- EX sdc1 $f19, SC_FPREGS+152(a0)
- EX sdc1 $f21, SC_FPREGS+168(a0)
- EX sdc1 $f23, SC_FPREGS+184(a0)
- EX sdc1 $f25, SC_FPREGS+200(a0)
- EX sdc1 $f27, SC_FPREGS+216(a0)
- EX sdc1 $f29, SC_FPREGS+232(a0)
- EX sdc1 $f31, SC_FPREGS+248(a0)
+ EX sdc1 $f1, 8(a0)
+ EX sdc1 $f3, 24(a0)
+ EX sdc1 $f5, 40(a0)
+ EX sdc1 $f7, 56(a0)
+ EX sdc1 $f9, 72(a0)
+ EX sdc1 $f11, 88(a0)
+ EX sdc1 $f13, 104(a0)
+ EX sdc1 $f15, 120(a0)
+ EX sdc1 $f17, 136(a0)
+ EX sdc1 $f19, 152(a0)
+ EX sdc1 $f21, 168(a0)
+ EX sdc1 $f23, 184(a0)
+ EX sdc1 $f25, 200(a0)
+ EX sdc1 $f27, 216(a0)
+ EX sdc1 $f29, 232(a0)
+ EX sdc1 $f31, 248(a0)
1: .set pop
#endif
.set push
SET_HARDFLOAT
/* Store the 16 even double precision registers */
- EX sdc1 $f0, SC_FPREGS+0(a0)
- EX sdc1 $f2, SC_FPREGS+16(a0)
- EX sdc1 $f4, SC_FPREGS+32(a0)
- EX sdc1 $f6, SC_FPREGS+48(a0)
- EX sdc1 $f8, SC_FPREGS+64(a0)
- EX sdc1 $f10, SC_FPREGS+80(a0)
- EX sdc1 $f12, SC_FPREGS+96(a0)
- EX sdc1 $f14, SC_FPREGS+112(a0)
- EX sdc1 $f16, SC_FPREGS+128(a0)
- EX sdc1 $f18, SC_FPREGS+144(a0)
- EX sdc1 $f20, SC_FPREGS+160(a0)
- EX sdc1 $f22, SC_FPREGS+176(a0)
- EX sdc1 $f24, SC_FPREGS+192(a0)
- EX sdc1 $f26, SC_FPREGS+208(a0)
- EX sdc1 $f28, SC_FPREGS+224(a0)
- EX sdc1 $f30, SC_FPREGS+240(a0)
- EX sw t1, SC_FPC_CSR(a0)
+ EX sdc1 $f0, 0(a0)
+ EX sdc1 $f2, 16(a0)
+ EX sdc1 $f4, 32(a0)
+ EX sdc1 $f6, 48(a0)
+ EX sdc1 $f8, 64(a0)
+ EX sdc1 $f10, 80(a0)
+ EX sdc1 $f12, 96(a0)
+ EX sdc1 $f14, 112(a0)
+ EX sdc1 $f16, 128(a0)
+ EX sdc1 $f18, 144(a0)
+ EX sdc1 $f20, 160(a0)
+ EX sdc1 $f22, 176(a0)
+ EX sdc1 $f24, 192(a0)
+ EX sdc1 $f26, 208(a0)
+ EX sdc1 $f28, 224(a0)
+ EX sdc1 $f30, 240(a0)
+ EX sw t1, 0(a1)
jr ra
li v0, 0 # success
.set pop
END(_save_fp_context)
-#ifdef CONFIG_MIPS32_COMPAT
- /* Save 32-bit process floating point context */
-LEAF(_save_fp_context32)
- .set push
- .set MIPS_ISA_ARCH_LEVEL_RAW
- SET_HARDFLOAT
- cfc1 t1, fcr31
-
-#ifndef CONFIG_CPU_MIPS64_R6
- mfc0 t0, CP0_STATUS
- sll t0, t0, 5
- bgez t0, 1f # skip storing odd if FR=0
- nop
-#endif
-
- /* Store the 16 odd double precision registers */
- EX sdc1 $f1, SC32_FPREGS+8(a0)
- EX sdc1 $f3, SC32_FPREGS+24(a0)
- EX sdc1 $f5, SC32_FPREGS+40(a0)
- EX sdc1 $f7, SC32_FPREGS+56(a0)
- EX sdc1 $f9, SC32_FPREGS+72(a0)
- EX sdc1 $f11, SC32_FPREGS+88(a0)
- EX sdc1 $f13, SC32_FPREGS+104(a0)
- EX sdc1 $f15, SC32_FPREGS+120(a0)
- EX sdc1 $f17, SC32_FPREGS+136(a0)
- EX sdc1 $f19, SC32_FPREGS+152(a0)
- EX sdc1 $f21, SC32_FPREGS+168(a0)
- EX sdc1 $f23, SC32_FPREGS+184(a0)
- EX sdc1 $f25, SC32_FPREGS+200(a0)
- EX sdc1 $f27, SC32_FPREGS+216(a0)
- EX sdc1 $f29, SC32_FPREGS+232(a0)
- EX sdc1 $f31, SC32_FPREGS+248(a0)
-
- /* Store the 16 even double precision registers */
-1: EX sdc1 $f0, SC32_FPREGS+0(a0)
- EX sdc1 $f2, SC32_FPREGS+16(a0)
- EX sdc1 $f4, SC32_FPREGS+32(a0)
- EX sdc1 $f6, SC32_FPREGS+48(a0)
- EX sdc1 $f8, SC32_FPREGS+64(a0)
- EX sdc1 $f10, SC32_FPREGS+80(a0)
- EX sdc1 $f12, SC32_FPREGS+96(a0)
- EX sdc1 $f14, SC32_FPREGS+112(a0)
- EX sdc1 $f16, SC32_FPREGS+128(a0)
- EX sdc1 $f18, SC32_FPREGS+144(a0)
- EX sdc1 $f20, SC32_FPREGS+160(a0)
- EX sdc1 $f22, SC32_FPREGS+176(a0)
- EX sdc1 $f24, SC32_FPREGS+192(a0)
- EX sdc1 $f26, SC32_FPREGS+208(a0)
- EX sdc1 $f28, SC32_FPREGS+224(a0)
- EX sdc1 $f30, SC32_FPREGS+240(a0)
- EX sw t1, SC32_FPC_CSR(a0)
- cfc1 t0, $0 # implementation/version
- EX sw t0, SC32_FPC_EIR(a0)
- .set pop
-
- jr ra
- li v0, 0 # success
- END(_save_fp_context32)
-#endif
-
-/*
- * Restore FPU state:
- * - fp gp registers
- * - cp1 status/control register
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
*/
LEAF(_restore_fp_context)
- EX lw t1, SC_FPC_CSR(a0)
+ EX lw t1, 0(a1)
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
defined(CONFIG_CPU_MIPS32_R6)
@@ -178,101 +130,231 @@ LEAF(_restore_fp_context)
bgez t0, 1f # skip loading odd if FR=0
nop
#endif
- EX ldc1 $f1, SC_FPREGS+8(a0)
- EX ldc1 $f3, SC_FPREGS+24(a0)
- EX ldc1 $f5, SC_FPREGS+40(a0)
- EX ldc1 $f7, SC_FPREGS+56(a0)
- EX ldc1 $f9, SC_FPREGS+72(a0)
- EX ldc1 $f11, SC_FPREGS+88(a0)
- EX ldc1 $f13, SC_FPREGS+104(a0)
- EX ldc1 $f15, SC_FPREGS+120(a0)
- EX ldc1 $f17, SC_FPREGS+136(a0)
- EX ldc1 $f19, SC_FPREGS+152(a0)
- EX ldc1 $f21, SC_FPREGS+168(a0)
- EX ldc1 $f23, SC_FPREGS+184(a0)
- EX ldc1 $f25, SC_FPREGS+200(a0)
- EX ldc1 $f27, SC_FPREGS+216(a0)
- EX ldc1 $f29, SC_FPREGS+232(a0)
- EX ldc1 $f31, SC_FPREGS+248(a0)
+ EX ldc1 $f1, 8(a0)
+ EX ldc1 $f3, 24(a0)
+ EX ldc1 $f5, 40(a0)
+ EX ldc1 $f7, 56(a0)
+ EX ldc1 $f9, 72(a0)
+ EX ldc1 $f11, 88(a0)
+ EX ldc1 $f13, 104(a0)
+ EX ldc1 $f15, 120(a0)
+ EX ldc1 $f17, 136(a0)
+ EX ldc1 $f19, 152(a0)
+ EX ldc1 $f21, 168(a0)
+ EX ldc1 $f23, 184(a0)
+ EX ldc1 $f25, 200(a0)
+ EX ldc1 $f27, 216(a0)
+ EX ldc1 $f29, 232(a0)
+ EX ldc1 $f31, 248(a0)
1: .set pop
#endif
.set push
SET_HARDFLOAT
- EX ldc1 $f0, SC_FPREGS+0(a0)
- EX ldc1 $f2, SC_FPREGS+16(a0)
- EX ldc1 $f4, SC_FPREGS+32(a0)
- EX ldc1 $f6, SC_FPREGS+48(a0)
- EX ldc1 $f8, SC_FPREGS+64(a0)
- EX ldc1 $f10, SC_FPREGS+80(a0)
- EX ldc1 $f12, SC_FPREGS+96(a0)
- EX ldc1 $f14, SC_FPREGS+112(a0)
- EX ldc1 $f16, SC_FPREGS+128(a0)
- EX ldc1 $f18, SC_FPREGS+144(a0)
- EX ldc1 $f20, SC_FPREGS+160(a0)
- EX ldc1 $f22, SC_FPREGS+176(a0)
- EX ldc1 $f24, SC_FPREGS+192(a0)
- EX ldc1 $f26, SC_FPREGS+208(a0)
- EX ldc1 $f28, SC_FPREGS+224(a0)
- EX ldc1 $f30, SC_FPREGS+240(a0)
+ EX ldc1 $f0, 0(a0)
+ EX ldc1 $f2, 16(a0)
+ EX ldc1 $f4, 32(a0)
+ EX ldc1 $f6, 48(a0)
+ EX ldc1 $f8, 64(a0)
+ EX ldc1 $f10, 80(a0)
+ EX ldc1 $f12, 96(a0)
+ EX ldc1 $f14, 112(a0)
+ EX ldc1 $f16, 128(a0)
+ EX ldc1 $f18, 144(a0)
+ EX ldc1 $f20, 160(a0)
+ EX ldc1 $f22, 176(a0)
+ EX ldc1 $f24, 192(a0)
+ EX ldc1 $f26, 208(a0)
+ EX ldc1 $f28, 224(a0)
+ EX ldc1 $f30, 240(a0)
ctc1 t1, fcr31
.set pop
jr ra
li v0, 0 # success
END(_restore_fp_context)
-#ifdef CONFIG_MIPS32_COMPAT
-LEAF(_restore_fp_context32)
- /* Restore an o32 sigcontext. */
- .set push
- SET_HARDFLOAT
- EX lw t1, SC32_FPC_CSR(a0)
+#ifdef CONFIG_CPU_HAS_MSA
-#ifndef CONFIG_CPU_MIPS64_R6
- mfc0 t0, CP0_STATUS
- sll t0, t0, 5
- bgez t0, 1f # skip loading odd if FR=0
+ .macro op_one_wr op, idx, base
+ .align 4
+\idx: \op \idx, 0, \base
+ jr ra
nop
-#endif
+ .endm
- EX ldc1 $f1, SC32_FPREGS+8(a0)
- EX ldc1 $f3, SC32_FPREGS+24(a0)
- EX ldc1 $f5, SC32_FPREGS+40(a0)
- EX ldc1 $f7, SC32_FPREGS+56(a0)
- EX ldc1 $f9, SC32_FPREGS+72(a0)
- EX ldc1 $f11, SC32_FPREGS+88(a0)
- EX ldc1 $f13, SC32_FPREGS+104(a0)
- EX ldc1 $f15, SC32_FPREGS+120(a0)
- EX ldc1 $f17, SC32_FPREGS+136(a0)
- EX ldc1 $f19, SC32_FPREGS+152(a0)
- EX ldc1 $f21, SC32_FPREGS+168(a0)
- EX ldc1 $f23, SC32_FPREGS+184(a0)
- EX ldc1 $f25, SC32_FPREGS+200(a0)
- EX ldc1 $f27, SC32_FPREGS+216(a0)
- EX ldc1 $f29, SC32_FPREGS+232(a0)
- EX ldc1 $f31, SC32_FPREGS+248(a0)
+ .macro op_msa_wr name, op
+LEAF(\name)
+ .set push
+ .set noreorder
+ sll t0, a0, 4
+ PTR_LA t1, 0f
+ PTR_ADDU t0, t0, t1
+ jr t0
+ nop
+ op_one_wr \op, 0, a1
+ op_one_wr \op, 1, a1
+ op_one_wr \op, 2, a1
+ op_one_wr \op, 3, a1
+ op_one_wr \op, 4, a1
+ op_one_wr \op, 5, a1
+ op_one_wr \op, 6, a1
+ op_one_wr \op, 7, a1
+ op_one_wr \op, 8, a1
+ op_one_wr \op, 9, a1
+ op_one_wr \op, 10, a1
+ op_one_wr \op, 11, a1
+ op_one_wr \op, 12, a1
+ op_one_wr \op, 13, a1
+ op_one_wr \op, 14, a1
+ op_one_wr \op, 15, a1
+ op_one_wr \op, 16, a1
+ op_one_wr \op, 17, a1
+ op_one_wr \op, 18, a1
+ op_one_wr \op, 19, a1
+ op_one_wr \op, 20, a1
+ op_one_wr \op, 21, a1
+ op_one_wr \op, 22, a1
+ op_one_wr \op, 23, a1
+ op_one_wr \op, 24, a1
+ op_one_wr \op, 25, a1
+ op_one_wr \op, 26, a1
+ op_one_wr \op, 27, a1
+ op_one_wr \op, 28, a1
+ op_one_wr \op, 29, a1
+ op_one_wr \op, 30, a1
+ op_one_wr \op, 31, a1
+ .set pop
+ END(\name)
+ .endm
-1: EX ldc1 $f0, SC32_FPREGS+0(a0)
- EX ldc1 $f2, SC32_FPREGS+16(a0)
- EX ldc1 $f4, SC32_FPREGS+32(a0)
- EX ldc1 $f6, SC32_FPREGS+48(a0)
- EX ldc1 $f8, SC32_FPREGS+64(a0)
- EX ldc1 $f10, SC32_FPREGS+80(a0)
- EX ldc1 $f12, SC32_FPREGS+96(a0)
- EX ldc1 $f14, SC32_FPREGS+112(a0)
- EX ldc1 $f16, SC32_FPREGS+128(a0)
- EX ldc1 $f18, SC32_FPREGS+144(a0)
- EX ldc1 $f20, SC32_FPREGS+160(a0)
- EX ldc1 $f22, SC32_FPREGS+176(a0)
- EX ldc1 $f24, SC32_FPREGS+192(a0)
- EX ldc1 $f26, SC32_FPREGS+208(a0)
- EX ldc1 $f28, SC32_FPREGS+224(a0)
- EX ldc1 $f30, SC32_FPREGS+240(a0)
- ctc1 t1, fcr31
+ op_msa_wr read_msa_wr_b, st_b
+ op_msa_wr read_msa_wr_h, st_h
+ op_msa_wr read_msa_wr_w, st_w
+ op_msa_wr read_msa_wr_d, st_d
+
+ op_msa_wr write_msa_wr_b, ld_b
+ op_msa_wr write_msa_wr_h, ld_h
+ op_msa_wr write_msa_wr_w, ld_w
+ op_msa_wr write_msa_wr_d, ld_d
+
+#endif /* CONFIG_CPU_HAS_MSA */
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+ .macro save_msa_upper wr, off, base
+ .set push
+ .set noat
+#ifdef CONFIG_64BIT
+ copy_u_d \wr, 1
+ EX sd $1, \off(\base)
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ copy_u_w \wr, 2
+ EX sw $1, \off(\base)
+ copy_u_w \wr, 3
+ EX sw $1, (\off+4)(\base)
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ copy_u_w \wr, 2
+ EX sw $1, (\off+4)(\base)
+ copy_u_w \wr, 3
+ EX sw $1, \off(\base)
+#endif
+ .set pop
+ .endm
+
+LEAF(_save_msa_all_upper)
+ save_msa_upper 0, 0x00, a0
+ save_msa_upper 1, 0x08, a0
+ save_msa_upper 2, 0x10, a0
+ save_msa_upper 3, 0x18, a0
+ save_msa_upper 4, 0x20, a0
+ save_msa_upper 5, 0x28, a0
+ save_msa_upper 6, 0x30, a0
+ save_msa_upper 7, 0x38, a0
+ save_msa_upper 8, 0x40, a0
+ save_msa_upper 9, 0x48, a0
+ save_msa_upper 10, 0x50, a0
+ save_msa_upper 11, 0x58, a0
+ save_msa_upper 12, 0x60, a0
+ save_msa_upper 13, 0x68, a0
+ save_msa_upper 14, 0x70, a0
+ save_msa_upper 15, 0x78, a0
+ save_msa_upper 16, 0x80, a0
+ save_msa_upper 17, 0x88, a0
+ save_msa_upper 18, 0x90, a0
+ save_msa_upper 19, 0x98, a0
+ save_msa_upper 20, 0xa0, a0
+ save_msa_upper 21, 0xa8, a0
+ save_msa_upper 22, 0xb0, a0
+ save_msa_upper 23, 0xb8, a0
+ save_msa_upper 24, 0xc0, a0
+ save_msa_upper 25, 0xc8, a0
+ save_msa_upper 26, 0xd0, a0
+ save_msa_upper 27, 0xd8, a0
+ save_msa_upper 28, 0xe0, a0
+ save_msa_upper 29, 0xe8, a0
+ save_msa_upper 30, 0xf0, a0
+ save_msa_upper 31, 0xf8, a0
jr ra
- li v0, 0 # success
- .set pop
- END(_restore_fp_context32)
+ li v0, 0
+ END(_save_msa_all_upper)
+
+ .macro restore_msa_upper wr, off, base
+ .set push
+ .set noat
+#ifdef CONFIG_64BIT
+ EX ld $1, \off(\base)
+ insert_d \wr, 1
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ EX lw $1, \off(\base)
+ insert_w \wr, 2
+ EX lw $1, (\off+4)(\base)
+ insert_w \wr, 3
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ EX lw $1, (\off+4)(\base)
+ insert_w \wr, 2
+ EX lw $1, \off(\base)
+ insert_w \wr, 3
#endif
+ .set pop
+ .endm
+
+LEAF(_restore_msa_all_upper)
+ restore_msa_upper 0, 0x00, a0
+ restore_msa_upper 1, 0x08, a0
+ restore_msa_upper 2, 0x10, a0
+ restore_msa_upper 3, 0x18, a0
+ restore_msa_upper 4, 0x20, a0
+ restore_msa_upper 5, 0x28, a0
+ restore_msa_upper 6, 0x30, a0
+ restore_msa_upper 7, 0x38, a0
+ restore_msa_upper 8, 0x40, a0
+ restore_msa_upper 9, 0x48, a0
+ restore_msa_upper 10, 0x50, a0
+ restore_msa_upper 11, 0x58, a0
+ restore_msa_upper 12, 0x60, a0
+ restore_msa_upper 13, 0x68, a0
+ restore_msa_upper 14, 0x70, a0
+ restore_msa_upper 15, 0x78, a0
+ restore_msa_upper 16, 0x80, a0
+ restore_msa_upper 17, 0x88, a0
+ restore_msa_upper 18, 0x90, a0
+ restore_msa_upper 19, 0x98, a0
+ restore_msa_upper 20, 0xa0, a0
+ restore_msa_upper 21, 0xa8, a0
+ restore_msa_upper 22, 0xb0, a0
+ restore_msa_upper 23, 0xb8, a0
+ restore_msa_upper 24, 0xc0, a0
+ restore_msa_upper 25, 0xc8, a0
+ restore_msa_upper 26, 0xd0, a0
+ restore_msa_upper 27, 0xd8, a0
+ restore_msa_upper 28, 0xe0, a0
+ restore_msa_upper 29, 0xe8, a0
+ restore_msa_upper 30, 0xf0, a0
+ restore_msa_upper 31, 0xf8, a0
+ jr ra
+ li v0, 0
+ END(_restore_msa_all_upper)
+
+#endif /* CONFIG_CPU_HAS_MSA */
.set reorder
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 04cbbde3521b..92cd0516ecf5 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -34,7 +34,7 @@
#ifndef USE_ALTERNATE_RESUME_IMPL
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti, s32 fp_save)
+ * struct thread_info *next_ti)
*/
.align 5
LEAF(resume)
@@ -43,45 +43,6 @@
cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0)
- /*
- * Check whether we need to save any FP context. FP context is saved
- * iff the process has used the context with the scalar FPU or the MSA
- * ASE in the current time slice, as indicated by _TIF_USEDFPU and
- * _TIF_USEDMSA respectively. switch_to will have set fp_save
- * accordingly to an FP_SAVE_ enum value.
- */
- beqz a3, 2f
-
- /*
- * We do. Clear the saved CU1 bit for prev, such that next time it is
- * scheduled it will start in userland with the FPU disabled. If the
- * task uses the FPU then it will be enabled again via the do_cpu trap.
- * This allows us to lazily restore the FP context.
- */
- PTR_L t3, TASK_THREAD_INFO(a0)
- LONG_L t0, ST_OFF(t3)
- li t1, ~ST0_CU1
- and t0, t0, t1
- LONG_S t0, ST_OFF(t3)
-
- /* Check whether we're saving scalar or vector context. */
- bgtz a3, 1f
-
- /* Save 128b MSA vector context + scalar FP control & status. */
- .set push
- SET_HARDFLOAT
- cfc1 t1, fcr31
- msa_save_all a0
- .set pop /* SET_HARDFLOAT */
-
- sw t1, THREAD_FCR31(a0)
- b 2f
-
-1: /* Save 32b/64b scalar FP context. */
- fpu_save_double a0 t0 t1 # c0_status passed in t0
- # clobbers t1
-2:
-
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index 0b85f827cd18..f50d48435c68 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -31,4 +31,13 @@ extern int fpcsr_pending(unsigned int __user *fpcsr);
#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
+/* Assembly functions to move context to/from the FPU */
+extern asmlinkage int
+_save_fp_context(void __user *fpregs, void __user *csr);
+extern asmlinkage int
+_restore_fp_context(void __user *fpregs, void __user *csr);
+
+extern asmlinkage int _save_msa_all_upper(void __user *buf);
+extern asmlinkage int _restore_msa_all_upper(void __user *buf);
+
#endif /* __SIGNAL_COMMON_H */
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 6a28c792d862..2fec67bfc457 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -21,6 +21,7 @@
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
+#include <linux/uprobes.h>
#include <linux/compiler.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
@@ -38,20 +39,21 @@
#include <asm/vdso.h>
#include <asm/dsp.h>
#include <asm/inst.h>
+#include <asm/msa.h>
#include "signal-common.h"
-static int (*save_fp_context)(struct sigcontext __user *sc);
-static int (*restore_fp_context)(struct sigcontext __user *sc);
-
-extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
-extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
+static int (*save_fp_context)(void __user *sc);
+static int (*restore_fp_context)(void __user *sc);
struct sigframe {
u32 sf_ass[4]; /* argument save space for o32 */
u32 sf_pad[2]; /* Was: signal trampoline */
+
+ /* Matches struct ucontext from its uc_mcontext field onwards */
struct sigcontext sf_sc;
sigset_t sf_mask;
+ unsigned long long sf_extcontext[0];
};
struct rt_sigframe {
@@ -65,43 +67,255 @@ struct rt_sigframe {
* Thread saved context copy to/from a signal context presumed to be on the
* user stack, and therefore accessed with appropriate macros from uaccess.h.
*/
-static int copy_fp_to_sigcontext(struct sigcontext __user *sc)
+static int copy_fp_to_sigcontext(void __user *sc)
{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
int i;
int err = 0;
+ int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
- for (i = 0; i < NUM_FPU_REGS; i++) {
+ for (i = 0; i < NUM_FPU_REGS; i += inc) {
err |=
__put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
- &sc->sc_fpregs[i]);
+ &fpregs[i]);
}
- err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+ err |= __put_user(current->thread.fpu.fcr31, csr);
return err;
}
-static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
+static int copy_fp_from_sigcontext(void __user *sc)
{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
int i;
int err = 0;
+ int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
u64 fpr_val;
- for (i = 0; i < NUM_FPU_REGS; i++) {
- err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
+ for (i = 0; i < NUM_FPU_REGS; i += inc) {
+ err |= __get_user(fpr_val, &fpregs[i]);
set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
}
- err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+ err |= __get_user(current->thread.fpu.fcr31, csr);
return err;
}
/*
+ * Wrappers for the assembly _{save,restore}_fp_context functions.
+ */
+static int save_hw_fp_context(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+
+ return _save_fp_context(fpregs, csr);
+}
+
+static int restore_hw_fp_context(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+
+ return _restore_fp_context(fpregs, csr);
+}
+
+/*
+ * Extended context handling.
+ */
+
+static inline void __user *sc_to_extcontext(void __user *sc)
+{
+ struct ucontext __user *uc;
+
+ /*
+ * We can just pretend the sigcontext is always embedded in a struct
+ * ucontext here, because the offset from sigcontext to extended
+ * context is the same in the struct sigframe case.
+ */
+ uc = container_of(sc, struct ucontext, uc_mcontext);
+ return &uc->uc_extcontext;
+}
+
+static int save_msa_extcontext(void __user *buf)
+{
+ struct msa_extcontext __user *msa = buf;
+ uint64_t val;
+ int i, err;
+
+ if (!thread_msa_context_live())
+ return 0;
+
+ /*
+ * Ensure that we can't lose the live MSA context between checking
+ * for it & writing it to memory.
+ */
+ preempt_disable();
+
+ if (is_msa_enabled()) {
+ /*
+ * There are no EVA versions of the vector register load/store
+ * instructions, so MSA context has to be saved to kernel memory
+ * and then copied to user memory. The save to kernel memory
+ * should already have been done when handling scalar FP
+ * context.
+ */
+ BUG_ON(config_enabled(CONFIG_EVA));
+
+ err = __put_user(read_msa_csr(), &msa->csr);
+ err |= _save_msa_all_upper(&msa->wr);
+
+ preempt_enable();
+ } else {
+ preempt_enable();
+
+ err = __put_user(current->thread.fpu.msacsr, &msa->csr);
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ val = get_fpr64(&current->thread.fpu.fpr[i], 1);
+ err |= __put_user(val, &msa->wr[i]);
+ }
+ }
+
+ err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic);
+ err |= __put_user(sizeof(*msa), &msa->ext.size);
+
+ return err ? -EFAULT : sizeof(*msa);
+}
+
+static int restore_msa_extcontext(void __user *buf, unsigned int size)
+{
+ struct msa_extcontext __user *msa = buf;
+ unsigned long long val;
+ unsigned int csr;
+ int i, err;
+
+ if (size != sizeof(*msa))
+ return -EINVAL;
+
+ err = get_user(csr, &msa->csr);
+ if (err)
+ return err;
+
+ preempt_disable();
+
+ if (is_msa_enabled()) {
+ /*
+ * There are no EVA versions of the vector register load/store
+ * instructions, so MSA context has to be copied to kernel
+ * memory and later loaded to registers. The same is true of
+ * scalar FP context, so FPU & MSA should have already been
+ * disabled whilst handling scalar FP context.
+ */
+ BUG_ON(config_enabled(CONFIG_EVA));
+
+ write_msa_csr(csr);
+ err |= _restore_msa_all_upper(&msa->wr);
+ preempt_enable();
+ } else {
+ preempt_enable();
+
+ current->thread.fpu.msacsr = csr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __get_user(val, &msa->wr[i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 1, val);
+ }
+ }
+
+ return err;
+}
+
+static int save_extcontext(void __user *buf)
+{
+ int sz;
+
+ sz = save_msa_extcontext(buf);
+ if (sz < 0)
+ return sz;
+ buf += sz;
+
+ /* If no context was saved then trivially return */
+ if (!sz)
+ return 0;
+
+ /* Write the end marker */
+ if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf))
+ return -EFAULT;
+
+ sz += sizeof(((struct extcontext *)NULL)->magic);
+ return sz;
+}
+
+static int restore_extcontext(void __user *buf)
+{
+ struct extcontext ext;
+ int err;
+
+ while (1) {
+ err = __get_user(ext.magic, (unsigned int *)buf);
+ if (err)
+ return err;
+
+ if (ext.magic == END_EXTCONTEXT_MAGIC)
+ return 0;
+
+ err = __get_user(ext.size, (unsigned int *)(buf
+ + offsetof(struct extcontext, size)));
+ if (err)
+ return err;
+
+ switch (ext.magic) {
+ case MSA_EXTCONTEXT_MAGIC:
+ err = restore_msa_extcontext(buf, ext.size);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ buf += ext.size;
+ }
+}
+
+/*
* Helper routines
*/
-static int protected_save_fp_context(struct sigcontext __user *sc)
+int protected_save_fp_context(void __user *sc)
{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+ uint32_t __user *used_math = sc + abi->off_sc_used_math;
+ unsigned int used, ext_sz;
int err;
-#ifndef CONFIG_EVA
+
+ used = used_math() ? USED_FP : 0;
+ if (!used)
+ goto fp_done;
+
+ if (!test_thread_flag(TIF_32BIT_FPREGS))
+ used |= USED_FR1;
+ if (test_thread_flag(TIF_HYBRID_FPREGS))
+ used |= USED_HYBRID_FPRS;
+
+ /*
+ * EVA does not have userland equivalents of ldc1 or sdc1, so
+ * save to the kernel FP context & copy that to userland below.
+ */
+ if (config_enabled(CONFIG_EVA))
+ lose_fpu(1);
+
while (1) {
lock_fpu_owner();
if (is_fpu_owner()) {
@@ -114,27 +328,57 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
if (likely(!err))
break;
/* touch the sigcontext and try again */
- err = __put_user(0, &sc->sc_fpregs[0]) |
- __put_user(0, &sc->sc_fpregs[31]) |
- __put_user(0, &sc->sc_fpc_csr);
+ err = __put_user(0, &fpregs[0]) |
+ __put_user(0, &fpregs[31]) |
+ __put_user(0, csr);
if (err)
- break; /* really bad sigcontext */
+ return err; /* really bad sigcontext */
}
-#else
- /*
- * EVA does not have FPU EVA instructions so saving fpu context directly
- * does not work.
- */
- lose_fpu(1);
- err = save_fp_context(sc); /* this might fail */
-#endif
- return err;
+
+fp_done:
+ ext_sz = err = save_extcontext(sc_to_extcontext(sc));
+ if (err < 0)
+ return err;
+ used |= ext_sz ? USED_EXTCONTEXT : 0;
+
+ return __put_user(used, used_math);
}
-static int protected_restore_fp_context(struct sigcontext __user *sc)
+int protected_restore_fp_context(void __user *sc)
{
- int err, tmp __maybe_unused;
-#ifndef CONFIG_EVA
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+ uint32_t __user *used_math = sc + abi->off_sc_used_math;
+ unsigned int used;
+ int err, sig = 0, tmp __maybe_unused;
+
+ err = __get_user(used, used_math);
+ conditional_used_math(used & USED_FP);
+
+ /*
+ * The signal handler may have used FPU; give it up if the program
+ * doesn't want it following sigreturn.
+ */
+ if (err || !(used & USED_FP))
+ lose_fpu(0);
+ if (err)
+ return err;
+ if (!(used & USED_FP))
+ goto fp_done;
+
+ err = sig = fpcsr_pending(csr);
+ if (err < 0)
+ return err;
+
+ /*
+ * EVA does not have userland equivalents of ldc1 or sdc1, so we
+ * disable the FPU here such that the code below simply copies to
+ * the kernel FP context.
+ */
+ if (config_enabled(CONFIG_EVA))
+ lose_fpu(0);
+
while (1) {
lock_fpu_owner();
if (is_fpu_owner()) {
@@ -147,28 +391,24 @@ static int protected_restore_fp_context(struct sigcontext __user *sc)
if (likely(!err))
break;
/* touch the sigcontext and try again */
- err = __get_user(tmp, &sc->sc_fpregs[0]) |
- __get_user(tmp, &sc->sc_fpregs[31]) |
- __get_user(tmp, &sc->sc_fpc_csr);
+ err = __get_user(tmp, &fpregs[0]) |
+ __get_user(tmp, &fpregs[31]) |
+ __get_user(tmp, csr);
if (err)
break; /* really bad sigcontext */
}
-#else
- /*
- * EVA does not have FPU EVA instructions so restoring fpu context
- * directly does not work.
- */
- lose_fpu(0);
- err = restore_fp_context(sc); /* this might fail */
-#endif
- return err;
+
+fp_done:
+ if (used & USED_EXTCONTEXT)
+ err |= restore_extcontext(sc_to_extcontext(sc));
+
+ return err ?: sig;
}
int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
int i;
- unsigned int used_math;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
@@ -191,19 +431,38 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
}
- used_math = !!used_math();
- err |= __put_user(used_math, &sc->sc_used_math);
- if (used_math) {
- /*
- * Save FPU state to signal context. Signal handler
- * will "inherit" current FPU state.
- */
- err |= protected_save_fp_context(sc);
- }
+ /*
+ * Save FPU state to signal context. Signal handler
+ * will "inherit" current FPU state.
+ */
+ err |= protected_save_fp_context(sc);
+
return err;
}
+static size_t extcontext_max_size(void)
+{
+ size_t sz = 0;
+
+ /*
+ * The assumption here is that between this point & the point at which
+ * the extended context is saved the size of the context should only
+ * ever be able to shrink (if the task is preempted), but never grow.
+ * That is, what this function returns is an upper bound on the size of
+ * the extended context for the current task at the current time.
+ */
+
+ if (thread_msa_context_live())
+ sz += sizeof(struct msa_extcontext);
+
+ /* If any context is saved then we'll append the end marker */
+ if (sz)
+ sz += sizeof(((struct extcontext *)NULL)->magic);
+
+ return sz;
+}
+
int fpcsr_pending(unsigned int __user *fpcsr)
{
int err, sig = 0;
@@ -223,21 +482,8 @@ int fpcsr_pending(unsigned int __user *fpcsr)
return err ?: sig;
}
-static int
-check_and_restore_fp_context(struct sigcontext __user *sc)
-{
- int err, sig;
-
- err = sig = fpcsr_pending(&sc->sc_fpc_csr);
- if (err > 0)
- err = 0;
- err |= protected_restore_fp_context(sc);
- return err ?: sig;
-}
-
int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
- unsigned int used_math;
unsigned long treg;
int err = 0;
int i;
@@ -265,19 +511,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
for (i = 1; i < 32; i++)
err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
- err |= __get_user(used_math, &sc->sc_used_math);
- conditional_used_math(used_math);
-
- if (used_math) {
- /* restore fpu context if we have used it before */
- if (!err)
- err = check_and_restore_fp_context(sc);
- } else {
- /* signal handler may have used FPU. Give it up. */
- lose_fpu(0);
- }
-
- return err;
+ return err ?: protected_restore_fp_context(sc);
}
void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
@@ -285,6 +519,9 @@ void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
{
unsigned long sp;
+ /* Leave space for potential extended context */
+ frame_size += extcontext_max_size();
+
/* Default to using normal stack */
sp = regs->regs[29];
@@ -520,7 +757,11 @@ struct mips_abi mips_abi = {
.setup_rt_frame = setup_rt_frame,
.rt_signal_return_offset =
offsetof(struct mips_vdso, rt_signal_trampoline),
- .restart = __NR_restart_syscall
+ .restart = __NR_restart_syscall,
+
+ .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
+ .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
+ .off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
};
static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
@@ -616,6 +857,9 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
user_exit();
+ if (thread_info_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
@@ -629,43 +873,46 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
}
#ifdef CONFIG_SMP
-#ifndef CONFIG_EVA
-static int smp_save_fp_context(struct sigcontext __user *sc)
+static int smp_save_fp_context(void __user *sc)
{
return raw_cpu_has_fpu
- ? _save_fp_context(sc)
+ ? save_hw_fp_context(sc)
: copy_fp_to_sigcontext(sc);
}
-static int smp_restore_fp_context(struct sigcontext __user *sc)
+static int smp_restore_fp_context(void __user *sc)
{
return raw_cpu_has_fpu
- ? _restore_fp_context(sc)
+ ? restore_hw_fp_context(sc)
: copy_fp_from_sigcontext(sc);
}
-#endif /* CONFIG_EVA */
#endif
static int signal_setup(void)
{
-#ifndef CONFIG_EVA
+ /*
+ * The offset from sigcontext to extended context should be the same
+ * regardless of the type of signal, such that userland can always know
+ * where to look if it wishes to find the extended context structures.
+ */
+ BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) -
+ offsetof(struct sigframe, sf_sc)) !=
+ (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) -
+ offsetof(struct rt_sigframe, rs_uc.uc_mcontext)));
+
#ifdef CONFIG_SMP
/* For now just do the cpu_has_fpu check when the functions are invoked */
save_fp_context = smp_save_fp_context;
restore_fp_context = smp_restore_fp_context;
#else
if (cpu_has_fpu) {
- save_fp_context = _save_fp_context;
- restore_fp_context = _restore_fp_context;
+ save_fp_context = save_hw_fp_context;
+ restore_fp_context = restore_hw_fp_context;
} else {
save_fp_context = copy_fp_to_sigcontext;
restore_fp_context = copy_fp_from_sigcontext;
}
#endif /* CONFIG_SMP */
-#else
- save_fp_context = copy_fp_to_sigcontext;
- restore_fp_context = copy_fp_from_sigcontext;
-#endif
return 0;
}
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 5d7f2634996f..f7e89524e316 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -36,12 +36,6 @@
#include "signal-common.h"
-static int (*save_fp_context32)(struct sigcontext32 __user *sc);
-static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
-
-extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
-
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
*/
@@ -74,99 +68,11 @@ struct rt_sigframe32 {
struct ucontext32 rs_uc;
};
-/*
- * Thread saved context copy to/from a signal context presumed to be on the
- * user stack, and therefore accessed with appropriate macros from uaccess.h.
- */
-static int copy_fp_to_sigcontext32(struct sigcontext32 __user *sc)
-{
- int i;
- int err = 0;
- int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
-
- for (i = 0; i < NUM_FPU_REGS; i += inc) {
- err |=
- __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
- &sc->sc_fpregs[i]);
- }
- err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
- return err;
-}
-
-static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
-{
- int i;
- int err = 0;
- int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
- u64 fpr_val;
-
- for (i = 0; i < NUM_FPU_REGS; i += inc) {
- err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
- set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
- }
- err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
- return err;
-}
-
-/*
- * sigcontext handlers
- */
-static int protected_save_fp_context32(struct sigcontext32 __user *sc)
-{
- int err;
- while (1) {
- lock_fpu_owner();
- if (is_fpu_owner()) {
- err = save_fp_context32(sc);
- unlock_fpu_owner();
- } else {
- unlock_fpu_owner();
- err = copy_fp_to_sigcontext32(sc);
- }
- if (likely(!err))
- break;
- /* touch the sigcontext and try again */
- err = __put_user(0, &sc->sc_fpregs[0]) |
- __put_user(0, &sc->sc_fpregs[31]) |
- __put_user(0, &sc->sc_fpc_csr);
- if (err)
- break; /* really bad sigcontext */
- }
- return err;
-}
-
-static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
-{
- int err, tmp __maybe_unused;
- while (1) {
- lock_fpu_owner();
- if (is_fpu_owner()) {
- err = restore_fp_context32(sc);
- unlock_fpu_owner();
- } else {
- unlock_fpu_owner();
- err = copy_fp_from_sigcontext32(sc);
- }
- if (likely(!err))
- break;
- /* touch the sigcontext and try again */
- err = __get_user(tmp, &sc->sc_fpregs[0]) |
- __get_user(tmp, &sc->sc_fpregs[31]) |
- __get_user(tmp, &sc->sc_fpc_csr);
- if (err)
- break; /* really bad sigcontext */
- }
- return err;
-}
-
static int setup_sigcontext32(struct pt_regs *regs,
struct sigcontext32 __user *sc)
{
int err = 0;
int i;
- u32 used_math;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
@@ -186,35 +92,18 @@ static int setup_sigcontext32(struct pt_regs *regs,
err |= __put_user(mflo3(), &sc->sc_lo3);
}
- used_math = !!used_math();
- err |= __put_user(used_math, &sc->sc_used_math);
+ /*
+ * Save FPU state to signal context. Signal handler
+ * will "inherit" current FPU state.
+ */
+ err |= protected_save_fp_context(sc);
- if (used_math) {
- /*
- * Save FPU state to signal context. Signal handler
- * will "inherit" current FPU state.
- */
- err |= protected_save_fp_context32(sc);
- }
return err;
}
-static int
-check_and_restore_fp_context32(struct sigcontext32 __user *sc)
-{
- int err, sig;
-
- err = sig = fpcsr_pending(&sc->sc_fpc_csr);
- if (err > 0)
- err = 0;
- err |= protected_restore_fp_context32(sc);
- return err ?: sig;
-}
-
static int restore_sigcontext32(struct pt_regs *regs,
struct sigcontext32 __user *sc)
{
- u32 used_math;
int err = 0;
s32 treg;
int i;
@@ -238,70 +127,7 @@ static int restore_sigcontext32(struct pt_regs *regs,
for (i = 1; i < 32; i++)
err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
- err |= __get_user(used_math, &sc->sc_used_math);
- conditional_used_math(used_math);
-
- if (used_math) {
- /* restore fpu context if we have used it before */
- if (!err)
- err = check_and_restore_fp_context32(sc);
- } else {
- /* signal handler may have used FPU. Give it up. */
- lose_fpu(0);
- }
-
- return err;
-}
-
-/*
- *
- */
-extern void __put_sigset_unknown_nsig(void);
-extern void __get_sigset_unknown_nsig(void);
-
-static inline int put_sigset(const sigset_t *kbuf, compat_sigset_t __user *ubuf)
-{
- int err = 0;
-
- if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)))
- return -EFAULT;
-
- switch (_NSIG_WORDS) {
- default:
- __put_sigset_unknown_nsig();
- case 2:
- err |= __put_user(kbuf->sig[1] >> 32, &ubuf->sig[3]);
- err |= __put_user(kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]);
- case 1:
- err |= __put_user(kbuf->sig[0] >> 32, &ubuf->sig[1]);
- err |= __put_user(kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]);
- }
-
- return err;
-}
-
-static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t __user *ubuf)
-{
- int err = 0;
- unsigned long sig[4];
-
- if (!access_ok(VERIFY_READ, ubuf, sizeof(*ubuf)))
- return -EFAULT;
-
- switch (_NSIG_WORDS) {
- default:
- __get_sigset_unknown_nsig();
- case 2:
- err |= __get_user(sig[3], &ubuf->sig[3]);
- err |= __get_user(sig[2], &ubuf->sig[2]);
- kbuf->sig[1] = sig[2] | (sig[3] << 32);
- case 1:
- err |= __get_user(sig[1], &ubuf->sig[1]);
- err |= __get_user(sig[0], &ubuf->sig[0]);
- kbuf->sig[0] = sig[0] | (sig[1] << 32);
- }
-
- return err;
+ return err ?: protected_restore_fp_context(sc);
}
/*
@@ -585,20 +411,9 @@ struct mips_abi mips_abi_32 = {
.setup_rt_frame = setup_rt_frame_32,
.rt_signal_return_offset =
offsetof(struct mips_vdso, o32_rt_signal_trampoline),
- .restart = __NR_O32_restart_syscall
-};
-
-static int signal32_init(void)
-{
- if (cpu_has_fpu) {
- save_fp_context32 = _save_fp_context32;
- restore_fp_context32 = _restore_fp_context32;
- } else {
- save_fp_context32 = copy_fp_to_sigcontext32;
- restore_fp_context32 = copy_fp_from_sigcontext32;
- }
+ .restart = __NR_O32_restart_syscall,
- return 0;
-}
-
-arch_initcall(signal32_init);
+ .off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs),
+ .off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr),
+ .off_sc_used_math = offsetof(struct sigcontext32, sc_used_math),
+};
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index f1d4751eead0..0d017fdcaf07 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -153,5 +153,9 @@ struct mips_abi mips_abi_n32 = {
.setup_rt_frame = setup_rt_frame_n32,
.rt_signal_return_offset =
offsetof(struct mips_vdso, n32_rt_signal_trampoline),
- .restart = __NR_N32_restart_syscall
+ .restart = __NR_N32_restart_syscall,
+
+ .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
+ .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
+ .off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
};
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index d1168d7c31e8..8489c88f9932 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -209,6 +209,7 @@ void spram_config(void)
case CPU_PROAPTIV:
case CPU_P5600:
case CPU_QEMU_GENERIC:
+ case CPU_I6400:
config0 = read_c0_config();
/* FIXME: addresses are Malta specific */
if (config0 & (1<<24)) {
diff --git a/arch/mips/kernel/sysrq.c b/arch/mips/kernel/sysrq.c
index 5b539f5fc9d9..5f055393092d 100644
--- a/arch/mips/kernel/sysrq.c
+++ b/arch/mips/kernel/sysrq.c
@@ -21,24 +21,12 @@ static DEFINE_SPINLOCK(show_lock);
static void sysrq_tlbdump_single(void *dummy)
{
- const int field = 2 * sizeof(unsigned long);
unsigned long flags;
spin_lock_irqsave(&show_lock, flags);
pr_info("CPU%d:\n", smp_processor_id());
- pr_info("Index : %0x\n", read_c0_index());
- pr_info("Pagemask: %0x\n", read_c0_pagemask());
- pr_info("EntryHi : %0*lx\n", field, read_c0_entryhi());
- pr_info("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
- pr_info("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
- pr_info("Wired : %0x\n", read_c0_wired());
- pr_info("Pagegrain: %0x\n", read_c0_pagegrain());
- if (cpu_has_htw) {
- pr_info("PWField : %0*lx\n", field, read_c0_pwfield());
- pr_info("PWSize : %0*lx\n", field, read_c0_pwsize());
- pr_info("PWCtl : %0x\n", read_c0_pwctl());
- }
+ dump_tlb_regs();
pr_info("\n");
dump_tlb_all();
pr_info("\n");
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 8ea28e6ab37d..fdb392b27e81 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -370,11 +370,6 @@ void show_registers(struct pt_regs *regs)
set_fs(old_fs);
}
-static int regs_to_trapnr(struct pt_regs *regs)
-{
- return (regs->cp0_cause >> 2) & 0x1f;
-}
-
static DEFINE_RAW_SPINLOCK(die_lock);
void __noreturn die(const char *str, struct pt_regs *regs)
@@ -384,7 +379,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
oops_enter();
- if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs),
+ if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
SIGSEGV) == NOTIFY_STOP)
sig = 0;
@@ -470,7 +465,7 @@ asmlinkage void do_be(struct pt_regs *regs)
printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
data ? "Data" : "Instruction",
field, regs->cp0_epc, field, regs->regs[31]);
- if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs),
+ if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
SIGBUS) == NOTIFY_STOP)
goto out;
@@ -826,7 +821,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
int sig;
prev_state = exception_enter();
- if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
+ if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
SIGFPE) == NOTIFY_STOP)
goto out;
@@ -882,11 +877,12 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
char b[40];
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
- if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
+ SIGTRAP) == NOTIFY_STOP)
return;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
- if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs),
+ if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
SIGTRAP) == NOTIFY_STOP)
return;
@@ -948,6 +944,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
set_fs(KERNEL_DS);
prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
if (get_isa16_mode(regs->cp0_epc)) {
u16 instr[2];
@@ -987,15 +984,27 @@ asmlinkage void do_bp(struct pt_regs *regs)
* pertain to them.
*/
switch (bcode) {
+ case BRK_UPROBE:
+ if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ case BRK_UPROBE_XOL:
+ if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
case BRK_KPROBE_BP:
if (notify_die(DIE_BREAK, "debug", regs, bcode,
- regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
goto out;
else
break;
case BRK_KPROBE_SSTEPBP:
if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
- regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
goto out;
else
break;
@@ -1028,6 +1037,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
set_fs(get_ds());
prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
if (get_isa16_mode(regs->cp0_epc)) {
if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
__get_user(instr[1], (u16 __user *)(epc + 2)))
@@ -1094,8 +1104,9 @@ asmlinkage void do_ri(struct pt_regs *regs)
no_r2_instr:
prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
- if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
+ if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
SIGILL) == NOTIFY_STOP)
goto out;
@@ -1444,8 +1455,9 @@ asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
enum ctx_state prev_state;
prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
- regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP)
+ current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
goto out;
/* Clear MSACSR.Cause before enabling interrupts */
@@ -1523,7 +1535,6 @@ asmlinkage void do_watch(struct pt_regs *regs)
asmlinkage void do_mcheck(struct pt_regs *regs)
{
- const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS;
enum ctx_state prev_state;
mm_segment_t old_fs = get_fs();
@@ -1532,19 +1543,8 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
show_regs(regs);
if (multi_match) {
- pr_err("Index : %0x\n", read_c0_index());
- pr_err("Pagemask: %0x\n", read_c0_pagemask());
- pr_err("EntryHi : %0*lx\n", field, read_c0_entryhi());
- pr_err("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
- pr_err("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
- pr_err("Wired : %0x\n", read_c0_wired());
- pr_err("Pagegrain: %0x\n", read_c0_pagegrain());
- if (cpu_has_htw) {
- pr_err("PWField : %0*lx\n", field, read_c0_pwfield());
- pr_err("PWSize : %0*lx\n", field, read_c0_pwsize());
- pr_err("PWCtl : %0x\n", read_c0_pwctl());
- }
- pr_err("\n");
+ dump_tlb_regs();
+ pr_info("\n");
dump_tlb_all();
}
@@ -1651,6 +1651,7 @@ static inline void parity_protection_init(void)
case CPU_PROAPTIV:
case CPU_P5600:
case CPU_QEMU_GENERIC:
+ case CPU_I6400:
{
#define ERRCTL_PE 0x80000000
#define ERRCTL_L2P 0x00800000
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index eb3efd137fd1..990354dd6bde 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -891,6 +891,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
#ifdef CONFIG_EVA
mm_segment_t seg;
#endif
+ union fpureg *fpr;
+ enum msa_2b_fmt df;
+ unsigned int wd;
origpc = (unsigned long)pc;
orig31 = regs->regs[31];
@@ -1202,6 +1205,75 @@ static void emulate_load_store_insn(struct pt_regs *regs,
break;
return;
+ case msa_op:
+ if (!cpu_has_msa)
+ goto sigill;
+
+ /*
+ * If we've reached this point then userland should have taken
+ * the MSA disabled exception & initialised vector context at
+ * some point in the past.
+ */
+ BUG_ON(!thread_msa_context_live());
+
+ df = insn.msa_mi10_format.df;
+ wd = insn.msa_mi10_format.wd;
+ fpr = &current->thread.fpu.fpr[wd];
+
+ switch (insn.msa_mi10_format.func) {
+ case msa_ld_op:
+ if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
+ goto sigbus;
+
+ /*
+ * Disable preemption to avoid a race between copying
+ * state from userland, migrating to another CPU and
+ * updating the hardware vector register below.
+ */
+ preempt_disable();
+
+ res = __copy_from_user_inatomic(fpr, addr,
+ sizeof(*fpr));
+ if (res)
+ goto fault;
+
+ /*
+ * Update the hardware register if it is in use by the
+ * task in this quantum, in order to avoid having to
+ * save & restore the whole vector context.
+ */
+ if (test_thread_flag(TIF_USEDMSA))
+ write_msa_wr(wd, fpr, df);
+
+ preempt_enable();
+ break;
+
+ case msa_st_op:
+ if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr)))
+ goto sigbus;
+
+ /*
+ * Update from the hardware register if it is in use by
+ * the task in this quantum, in order to avoid having to
+ * save & restore the whole vector context.
+ */
+ preempt_disable();
+ if (test_thread_flag(TIF_USEDMSA))
+ read_msa_wr(wd, fpr, df);
+ preempt_enable();
+
+ res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
+ if (res)
+ goto fault;
+ break;
+
+ default:
+ goto sigbus;
+ }
+
+ compute_return_epc(regs);
+ break;
+
#ifndef CONFIG_CPU_MIPSR6
/*
* COP2 is available to implementor for application specific use.
@@ -2240,5 +2312,5 @@ static int __init debugfs_unaligned(void)
return -ENOMEM;
return 0;
}
-__initcall(debugfs_unaligned);
+arch_initcall(debugfs_unaligned);
#endif
diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
new file mode 100644
index 000000000000..8452d933a645
--- /dev/null
+++ b/arch/mips/kernel/uprobes.c
@@ -0,0 +1,341 @@
+#include <linux/highmem.h>
+#include <linux/kdebug.h>
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/uprobes.h>
+
+#include <asm/branch.h>
+#include <asm/cpu-features.h>
+#include <asm/ptrace.h>
+#include <asm/inst.h>
+
+static inline int insn_has_delay_slot(const union mips_instruction insn)
+{
+ switch (insn.i_format.opcode) {
+ /*
+ * jr and jalr are in r_format format.
+ */
+ case spec_op:
+ switch (insn.r_format.func) {
+ case jalr_op:
+ case jr_op:
+ return 1;
+ }
+ break;
+
+ /*
+ * This group contains:
+ * bltz_op, bgez_op, bltzl_op, bgezl_op,
+ * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+ */
+ case bcond_op:
+ switch (insn.i_format.rt) {
+ case bltz_op:
+ case bltzl_op:
+ case bgez_op:
+ case bgezl_op:
+ case bltzal_op:
+ case bltzall_op:
+ case bgezal_op:
+ case bgezall_op:
+ case bposge32_op:
+ return 1;
+ }
+ break;
+
+ /*
+ * These are unconditional and in j_format.
+ */
+ case jal_op:
+ case j_op:
+ case beq_op:
+ case beql_op:
+ case bne_op:
+ case bnel_op:
+ case blez_op: /* not really i_format */
+ case blezl_op:
+ case bgtz_op:
+ case bgtzl_op:
+ return 1;
+
+ /*
+ * And now the FPA/cp1 branch instructions.
+ */
+ case cop1_op:
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ case ldc2_op: /* This is bbit032 on Octeon */
+ case swc2_op: /* This is bbit1 on Octeon */
+ case sdc2_op: /* This is bbit132 on Octeon */
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
+ * @mm: the probed address space.
+ * @arch_uprobe: the probepoint information.
+ * @addr: virtual address at which to install the probepoint
+ * Return 0 on success or a -ve number on error.
+ */
+int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
+ struct mm_struct *mm, unsigned long addr)
+{
+ union mips_instruction inst;
+
+ /*
+ * For the time being this also blocks attempts to use uprobes with
+ * MIPS16 and microMIPS.
+ */
+ if (addr & 0x03)
+ return -EINVAL;
+
+ inst.word = aup->insn[0];
+ aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)];
+ aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */
+
+ return 0;
+}
+
+/**
+ * is_trap_insn - check if the instruction is a trap variant
+ * @insn: instruction to be checked.
+ * Returns true if @insn is a trap variant.
+ *
+ * This definition overrides the weak definition in kernel/events/uprobes.c.
+ * and is needed for the case where an architecture has multiple trap
+ * instructions (like PowerPC or MIPS). We treat BREAK just like the more
+ * modern conditional trap instructions.
+ */
+bool is_trap_insn(uprobe_opcode_t *insn)
+{
+ union mips_instruction inst;
+
+ inst.word = *insn;
+
+ switch (inst.i_format.opcode) {
+ case spec_op:
+ switch (inst.r_format.func) {
+ case break_op:
+ case teq_op:
+ case tge_op:
+ case tgeu_op:
+ case tlt_op:
+ case tltu_op:
+ case tne_op:
+ return 1;
+ }
+ break;
+
+ case bcond_op: /* Yes, really ... */
+ switch (inst.u_format.rt) {
+ case teqi_op:
+ case tgei_op:
+ case tgeiu_op:
+ case tlti_op:
+ case tltiu_op:
+ case tnei_op:
+ return 1;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+#define UPROBE_TRAP_NR ULONG_MAX
+
+/*
+ * arch_uprobe_pre_xol - prepare to execute out of line.
+ * @auprobe: the probepoint information.
+ * @regs: reflects the saved user state of current task.
+ */
+int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+ union mips_instruction insn;
+
+ /*
+ * Now find the EPC where to resume after the breakpoint has been
+ * dealt with. This may require emulation of a branch.
+ */
+ aup->resume_epc = regs->cp0_epc + 4;
+ if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) {
+ unsigned long epc;
+
+ epc = regs->cp0_epc;
+ __compute_return_epc_for_insn(regs, insn);
+ aup->resume_epc = regs->cp0_epc;
+ }
+
+ utask->autask.saved_trap_nr = current->thread.trap_nr;
+ current->thread.trap_nr = UPROBE_TRAP_NR;
+ regs->cp0_epc = current->utask->xol_vaddr;
+
+ return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ current->thread.trap_nr = utask->autask.saved_trap_nr;
+ regs->cp0_epc = aup->resume_epc;
+
+ return 0;
+}
+
+/*
+ * If xol insn itself traps and generates a signal(Say,
+ * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
+ * instruction jumps back to its own address. It is assumed that anything
+ * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
+ *
+ * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
+ * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
+ * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
+ */
+bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
+{
+ if (tsk->thread.trap_nr != UPROBE_TRAP_NR)
+ return true;
+
+ return false;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+ struct pt_regs *regs = args->regs;
+
+ /* regs == NULL is a kernel bug */
+ if (WARN_ON(!regs))
+ return NOTIFY_DONE;
+
+ /* We are only interested in userspace traps */
+ if (!user_mode(regs))
+ return NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_BREAK:
+ if (uprobe_pre_sstep_notifier(regs))
+ return NOTIFY_STOP;
+ break;
+ case DIE_UPROBE_XOL:
+ if (uprobe_post_sstep_notifier(regs))
+ return NOTIFY_STOP;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * This function gets called when XOL instruction either gets trapped or
+ * the thread has a fatal signal. Reset the instruction pointer to its
+ * probed address for the potential restart or for post mortem analysis.
+ */
+void arch_uprobe_abort_xol(struct arch_uprobe *aup,
+ struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ instruction_pointer_set(regs, utask->vaddr);
+}
+
+unsigned long arch_uretprobe_hijack_return_addr(
+ unsigned long trampoline_vaddr, struct pt_regs *regs)
+{
+ unsigned long ra;
+
+ ra = regs->regs[31];
+
+ /* Replace the return address with the trampoline address */
+ regs->regs[31] = ra;
+
+ return ra;
+}
+
+/**
+ * set_swbp - store breakpoint at a given address.
+ * @auprobe: arch specific probepoint information.
+ * @mm: the probed process address space.
+ * @vaddr: the virtual address to insert the opcode.
+ *
+ * For mm @mm, store the breakpoint instruction at @vaddr.
+ * Return 0 (success) or a negative errno.
+ *
+ * This version overrides the weak version in kernel/events/uprobes.c.
+ * It is required to handle MIPS16 and microMIPS.
+ */
+int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ unsigned long vaddr)
+{
+ return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
+}
+
+/**
+ * set_orig_insn - Restore the original instruction.
+ * @mm: the probed process address space.
+ * @auprobe: arch specific probepoint information.
+ * @vaddr: the virtual address to insert the opcode.
+ *
+ * For mm @mm, restore the original opcode (opcode) at @vaddr.
+ * Return 0 (success) or a negative errno.
+ *
+ * This overrides the weak version in kernel/events/uprobes.c.
+ */
+int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ unsigned long vaddr)
+{
+ return uprobe_write_opcode(mm, vaddr,
+ *(uprobe_opcode_t *)&auprobe->orig_inst[0].word);
+}
+
+void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ void *src, unsigned long len)
+{
+ void *kaddr;
+
+ /* Initialize the slot */
+ kaddr = kmap_atomic(page);
+ memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
+ kunmap_atomic(kaddr);
+
+ /*
+ * The MIPS version of flush_icache_range will operate safely on
+ * user space addresses and more importantly, it doesn't require a
+ * VMA argument.
+ */
+ flush_icache_range(vaddr, vaddr + len);
+}
+
+/**
+ * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
+ * @regs: Reflects the saved state of the task after it has hit a breakpoint
+ * instruction.
+ * Return the address of the breakpoint instruction.
+ *
+ * This overrides the weak version in kernel/events/uprobes.c.
+ */
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+/*
+ * See if the instruction can be emulated.
+ * Returns true if instruction was emulated, false otherwise.
+ *
+ * For now we always emulate so this function just returns 0.
+ */
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ return 0;
+}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 11da314565cc..9067b651c7a2 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -817,6 +817,7 @@ static int vpe_open(struct inode *inode, struct file *filp)
static int vpe_release(struct inode *inode, struct file *filp)
{
+#if defined(CONFIG_MIPS_VPE_LOADER_MT) || defined(CONFIG_MIPS_VPE_LOADER_CMP)
struct vpe *v;
Elf_Ehdr *hdr;
int ret = 0;
@@ -827,7 +828,7 @@ static int vpe_release(struct inode *inode, struct file *filp)
hdr = (Elf_Ehdr *) v->pbuffer;
if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
- if ((vpe_elfload(v) >= 0) && vpe_run) {
+ if (vpe_elfload(v) >= 0) {
vpe_run(v);
} else {
pr_warn("VPE loader: ELF load failed.\n");
@@ -850,6 +851,10 @@ static int vpe_release(struct inode *inode, struct file *filp)
v->plen = 0;
return ret;
+#else
+ pr_warn("VPE loader: ELF load failed.\n");
+ return -ENOEXEC;
+#endif
}
static ssize_t vpe_write(struct file *file, const char __user *buffer,