summaryrefslogtreecommitdiff
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/bcm47xx/setup.c2
-rw-r--r--arch/mips/include/asm/checksum.h68
-rw-r--r--arch/mips/include/asm/compat.h2
-rw-r--r--arch/mips/include/asm/cpu-type.h1
-rw-r--r--arch/mips/include/asm/irqflags.h5
-rw-r--r--arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-loongson64/irq.h2
-rw-r--r--arch/mips/include/asm/mach-loongson64/mmzone.h1
-rw-r--r--arch/mips/include/asm/unroll.h64
-rw-r--r--arch/mips/kernel/kprobes.c54
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c4
-rw-r--r--arch/mips/kernel/smp-bmips.c2
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl12
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl12
-rw-r--r--arch/mips/kernel/traps.c12
-rw-r--r--arch/mips/kernel/vmlinux.lds.S1
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/mips/lib/csum_partial.S261
-rw-r--r--arch/mips/loongson2ef/Platform4
-rw-r--r--arch/mips/loongson64/cop2-ex.c24
-rw-r--r--arch/mips/mm/c-r4k.c4
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c4
-rw-r--r--arch/mips/sni/a20r.c13
24 files changed, 203 insertions, 355 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 5f88a8fc11fc..440614dc9de2 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -86,6 +86,7 @@ config MIPS
select MODULES_USE_ELF_REL if MODULES
select MODULES_USE_ELF_RELA if MODULES && 64BIT
select PERF_USE_VMALLOC
+ select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
select RTC_LIB
select SYSCTL_EXCEPTION_TRACE
select VIRT_TO_BUS
@@ -877,6 +878,7 @@ config SNI_RM
select I8253
select I8259
select ISA
+ select MIPS_L1_CACHE_SHIFT_6
select SWAP_IO_SPACE if CPU_BIG_ENDIAN
select SYS_HAS_CPU_R4X00
select SYS_HAS_CPU_R5000
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 82627c264964..01427bde2397 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -148,7 +148,7 @@ void __init plat_mem_setup(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
- if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K)) {
+ if (c->cputype == CPU_74K) {
pr_info("Using bcma bus\n");
#ifdef CONFIG_BCM47XX_BCMA
bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index 181f7d14efb9..5f80c28f5253 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -34,42 +34,17 @@
*/
__wsum csum_partial(const void *buff, int len, __wsum sum);
-__wsum __csum_partial_copy_kernel(const void *src, void *dst,
- int len, __wsum sum, int *err_ptr);
-
-__wsum __csum_partial_copy_from_user(const void *src, void *dst,
- int len, __wsum sum, int *err_ptr);
-__wsum __csum_partial_copy_to_user(const void *src, void *dst,
- int len, __wsum sum, int *err_ptr);
-/*
- * this is a new version of the above that records errors it finds in *errp,
- * but continues and zeros the rest of the buffer.
- */
-static inline
-__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
- __wsum sum, int *err_ptr)
-{
- might_fault();
- if (uaccess_kernel())
- return __csum_partial_copy_kernel((__force void *)src, dst,
- len, sum, err_ptr);
- else
- return __csum_partial_copy_from_user((__force void *)src, dst,
- len, sum, err_ptr);
-}
+__wsum __csum_partial_copy_from_user(const void __user *src, void *dst, int len);
+__wsum __csum_partial_copy_to_user(const void *src, void __user *dst, int len);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
-__wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
- if (access_ok(src, len))
- return csum_partial_copy_from_user(src, dst, len, sum,
- err_ptr);
- if (len)
- *err_ptr = -EFAULT;
-
- return sum;
+ might_fault();
+ if (!access_ok(src, len))
+ return 0;
+ return __csum_partial_copy_from_user(src, dst, len);
}
/*
@@ -77,33 +52,24 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
*/
#define HAVE_CSUM_COPY_USER
static inline
-__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
- __wsum sum, int *err_ptr)
+__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
might_fault();
- if (access_ok(dst, len)) {
- if (uaccess_kernel())
- return __csum_partial_copy_kernel(src,
- (__force void *)dst,
- len, sum, err_ptr);
- else
- return __csum_partial_copy_to_user(src,
- (__force void *)dst,
- len, sum, err_ptr);
- }
- if (len)
- *err_ptr = -EFAULT;
-
- return (__force __wsum)-1; /* invalid checksum */
+ if (!access_ok(dst, len))
+ return 0;
+ return __csum_partial_copy_to_user(src, dst, len);
}
/*
* the same as csum_partial, but copies from user space (but on MIPS
* we have just one address space, so this is identical to the above)
*/
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
-#define csum_partial_copy_nocheck csum_partial_copy_nocheck
+#define _HAVE_ARCH_CSUM_AND_COPY
+__wsum __csum_partial_copy_nocheck(const void *src, void *dst, int len);
+static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
+{
+ return __csum_partial_copy_nocheck(src, dst, len);
+}
/*
* Fold a partial checksum without adding pseudo headers
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 255afcdd79c9..65975712a22d 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -26,8 +26,6 @@ typedef s32 compat_caddr_t;
typedef struct {
s32 val[2];
} compat_fsid_t;
-typedef s64 compat_s64;
-typedef u64 compat_u64;
struct compat_stat {
compat_dev_t st_dev;
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index 75a7a382da09..3288cef4b168 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_34K:
case CPU_1004K:
case CPU_74K:
+ case CPU_1074K:
case CPU_M14KC:
case CPU_M14KEC:
case CPU_INTERAPTIV:
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 47a8ffc0b413..f5b8300f4573 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -137,6 +137,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
return !(flags & 1);
}
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
#endif /* #ifndef __ASSEMBLY__ */
/*
diff --git a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
index b6e9c99b85a5..eb181224eb4c 100644
--- a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
@@ -26,7 +26,6 @@
#define cpu_has_counter 1
#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
#define cpu_has_divec 0
-#define cpu_has_ejtag 0
#define cpu_has_inclusive_pcaches 1
#define cpu_has_llsc 1
#define cpu_has_mcheck 0
@@ -42,7 +41,6 @@
#define cpu_has_veic 0
#define cpu_has_vint 0
#define cpu_has_vtag_icache 0
-#define cpu_has_watch 1
#define cpu_has_wsbh 1
#define cpu_has_ic_fills_f_dc 1
#define cpu_hwrena_impl_bits 0xc0000000
diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h
index f5e362f79701..bf2480923154 100644
--- a/arch/mips/include/asm/mach-loongson64/irq.h
+++ b/arch/mips/include/asm/mach-loongson64/irq.h
@@ -2,8 +2,6 @@
#ifndef __ASM_MACH_LOONGSON64_IRQ_H_
#define __ASM_MACH_LOONGSON64_IRQ_H_
-#include <boot_param.h>
-
/* cpu core interrupt numbers */
#define NR_IRQS_LEGACY 16
#define NR_MIPS_CPU_IRQS 8
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index 3a25dbd3b3e9..5eaca4fe3f92 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -9,7 +9,6 @@
#ifndef _ASM_MACH_LOONGSON64_MMZONE_H
#define _ASM_MACH_LOONGSON64_MMZONE_H
-#include <boot_param.h>
#define NODE_ADDRSPACE_SHIFT 44
#define NODE0_ADDRSPACE_OFFSET 0x000000000000UL
#define NODE1_ADDRSPACE_OFFSET 0x100000000000UL
diff --git a/arch/mips/include/asm/unroll.h b/arch/mips/include/asm/unroll.h
index 7dd4a80e05d6..6f4ac854b12d 100644
--- a/arch/mips/include/asm/unroll.h
+++ b/arch/mips/include/asm/unroll.h
@@ -28,38 +28,38 @@
BUILD_BUG_ON(!__builtin_constant_p(times)); \
\
switch (times) { \
- case 32: fn(__VA_ARGS__); /* fall through */ \
- case 31: fn(__VA_ARGS__); /* fall through */ \
- case 30: fn(__VA_ARGS__); /* fall through */ \
- case 29: fn(__VA_ARGS__); /* fall through */ \
- case 28: fn(__VA_ARGS__); /* fall through */ \
- case 27: fn(__VA_ARGS__); /* fall through */ \
- case 26: fn(__VA_ARGS__); /* fall through */ \
- case 25: fn(__VA_ARGS__); /* fall through */ \
- case 24: fn(__VA_ARGS__); /* fall through */ \
- case 23: fn(__VA_ARGS__); /* fall through */ \
- case 22: fn(__VA_ARGS__); /* fall through */ \
- case 21: fn(__VA_ARGS__); /* fall through */ \
- case 20: fn(__VA_ARGS__); /* fall through */ \
- case 19: fn(__VA_ARGS__); /* fall through */ \
- case 18: fn(__VA_ARGS__); /* fall through */ \
- case 17: fn(__VA_ARGS__); /* fall through */ \
- case 16: fn(__VA_ARGS__); /* fall through */ \
- case 15: fn(__VA_ARGS__); /* fall through */ \
- case 14: fn(__VA_ARGS__); /* fall through */ \
- case 13: fn(__VA_ARGS__); /* fall through */ \
- case 12: fn(__VA_ARGS__); /* fall through */ \
- case 11: fn(__VA_ARGS__); /* fall through */ \
- case 10: fn(__VA_ARGS__); /* fall through */ \
- case 9: fn(__VA_ARGS__); /* fall through */ \
- case 8: fn(__VA_ARGS__); /* fall through */ \
- case 7: fn(__VA_ARGS__); /* fall through */ \
- case 6: fn(__VA_ARGS__); /* fall through */ \
- case 5: fn(__VA_ARGS__); /* fall through */ \
- case 4: fn(__VA_ARGS__); /* fall through */ \
- case 3: fn(__VA_ARGS__); /* fall through */ \
- case 2: fn(__VA_ARGS__); /* fall through */ \
- case 1: fn(__VA_ARGS__); /* fall through */ \
+ case 32: fn(__VA_ARGS__); fallthrough; \
+ case 31: fn(__VA_ARGS__); fallthrough; \
+ case 30: fn(__VA_ARGS__); fallthrough; \
+ case 29: fn(__VA_ARGS__); fallthrough; \
+ case 28: fn(__VA_ARGS__); fallthrough; \
+ case 27: fn(__VA_ARGS__); fallthrough; \
+ case 26: fn(__VA_ARGS__); fallthrough; \
+ case 25: fn(__VA_ARGS__); fallthrough; \
+ case 24: fn(__VA_ARGS__); fallthrough; \
+ case 23: fn(__VA_ARGS__); fallthrough; \
+ case 22: fn(__VA_ARGS__); fallthrough; \
+ case 21: fn(__VA_ARGS__); fallthrough; \
+ case 20: fn(__VA_ARGS__); fallthrough; \
+ case 19: fn(__VA_ARGS__); fallthrough; \
+ case 18: fn(__VA_ARGS__); fallthrough; \
+ case 17: fn(__VA_ARGS__); fallthrough; \
+ case 16: fn(__VA_ARGS__); fallthrough; \
+ case 15: fn(__VA_ARGS__); fallthrough; \
+ case 14: fn(__VA_ARGS__); fallthrough; \
+ case 13: fn(__VA_ARGS__); fallthrough; \
+ case 12: fn(__VA_ARGS__); fallthrough; \
+ case 11: fn(__VA_ARGS__); fallthrough; \
+ case 10: fn(__VA_ARGS__); fallthrough; \
+ case 9: fn(__VA_ARGS__); fallthrough; \
+ case 8: fn(__VA_ARGS__); fallthrough; \
+ case 7: fn(__VA_ARGS__); fallthrough; \
+ case 6: fn(__VA_ARGS__); fallthrough; \
+ case 5: fn(__VA_ARGS__); fallthrough; \
+ case 4: fn(__VA_ARGS__); fallthrough; \
+ case 3: fn(__VA_ARGS__); fallthrough; \
+ case 2: fn(__VA_ARGS__); fallthrough; \
+ case 1: fn(__VA_ARGS__); fallthrough; \
case 0: break; \
\
default: \
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index d043c2f897fc..54dfba8fa77c 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -477,6 +477,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
+ ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->regs[31] = (unsigned long)kretprobe_trampoline;
@@ -488,57 +489,8 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more than one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
- instruction_pointer(regs) = orig_ret_address;
-
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
+ instruction_pointer(regs) = __kretprobe_trampoline_handler(regs,
+ kretprobe_trampoline, NULL);
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index efce5defcc5c..011eb6bbf81a 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1898,8 +1898,8 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
(base_id >= 64 && base_id < 90) ||
(base_id >= 128 && base_id < 164) ||
(base_id >= 192 && base_id < 200) ||
- (base_id >= 256 && base_id < 274) ||
- (base_id >= 320 && base_id < 358) ||
+ (base_id >= 256 && base_id < 275) ||
+ (base_id >= 320 && base_id < 361) ||
(base_id >= 384 && base_id < 574))
break;
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 2f513506a3d5..1dbfb5aadffd 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -239,6 +239,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct *idle)
*/
static void bmips_init_secondary(void)
{
+ bmips_cpu_setup();
+
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index f9df9edb67a4..cf72a0206a87 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -25,8 +25,8 @@
15 n32 ioctl compat_sys_ioctl
16 n32 pread64 sys_pread64
17 n32 pwrite64 sys_pwrite64
-18 n32 readv compat_sys_readv
-19 n32 writev compat_sys_writev
+18 n32 readv sys_readv
+19 n32 writev sys_writev
20 n32 access sys_access
21 n32 pipe sysm_pipe
22 n32 _newselect compat_sys_select
@@ -167,7 +167,7 @@
157 n32 sync sys_sync
158 n32 acct sys_acct
159 n32 settimeofday compat_sys_settimeofday
-160 n32 mount compat_sys_mount
+160 n32 mount sys_mount
161 n32 umount2 sys_umount
162 n32 swapon sys_swapon
163 n32 swapoff sys_swapoff
@@ -278,7 +278,7 @@
267 n32 splice sys_splice
268 n32 sync_file_range sys_sync_file_range
269 n32 tee sys_tee
-270 n32 vmsplice compat_sys_vmsplice
+270 n32 vmsplice sys_vmsplice
271 n32 move_pages compat_sys_move_pages
272 n32 set_robust_list compat_sys_set_robust_list
273 n32 get_robust_list compat_sys_get_robust_list
@@ -317,8 +317,8 @@
306 n32 syncfs sys_syncfs
307 n32 sendmmsg compat_sys_sendmmsg
308 n32 setns sys_setns
-309 n32 process_vm_readv compat_sys_process_vm_readv
-310 n32 process_vm_writev compat_sys_process_vm_writev
+309 n32 process_vm_readv sys_process_vm_readv
+310 n32 process_vm_writev sys_process_vm_writev
311 n32 kcmp sys_kcmp
312 n32 finit_module sys_finit_module
313 n32 sched_setattr sys_sched_setattr
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 195b43cf27c8..a17aab5abeb2 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -29,7 +29,7 @@
18 o32 unused18 sys_ni_syscall
19 o32 lseek sys_lseek
20 o32 getpid sys_getpid
-21 o32 mount sys_mount compat_sys_mount
+21 o32 mount sys_mount
22 o32 umount sys_oldumount
23 o32 setuid sys_setuid
24 o32 getuid sys_getuid
@@ -156,8 +156,8 @@
142 o32 _newselect sys_select compat_sys_select
143 o32 flock sys_flock
144 o32 msync sys_msync
-145 o32 readv sys_readv compat_sys_readv
-146 o32 writev sys_writev compat_sys_writev
+145 o32 readv sys_readv
+146 o32 writev sys_writev
147 o32 cacheflush sys_cacheflush
148 o32 cachectl sys_cachectl
149 o32 sysmips __sys_sysmips
@@ -318,7 +318,7 @@
304 o32 splice sys_splice
305 o32 sync_file_range sys_sync_file_range sys32_sync_file_range
306 o32 tee sys_tee
-307 o32 vmsplice sys_vmsplice compat_sys_vmsplice
+307 o32 vmsplice sys_vmsplice
308 o32 move_pages sys_move_pages compat_sys_move_pages
309 o32 set_robust_list sys_set_robust_list compat_sys_set_robust_list
310 o32 get_robust_list sys_get_robust_list compat_sys_get_robust_list
@@ -356,8 +356,8 @@
342 o32 syncfs sys_syncfs
343 o32 sendmmsg sys_sendmmsg compat_sys_sendmmsg
344 o32 setns sys_setns
-345 o32 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
-346 o32 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+345 o32 process_vm_readv sys_process_vm_readv
+346 o32 process_vm_writev sys_process_vm_writev
347 o32 kcmp sys_kcmp
348 o32 finit_module sys_finit_module
349 o32 sched_setattr sys_sched_setattr
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 38aa07ccdbcc..cf788591f091 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1287,6 +1287,18 @@ static int enable_restore_fp_context(int msa)
err = own_fpu_inatomic(1);
if (msa && !err) {
enable_msa();
+ /*
+ * with MSA enabled, userspace can see MSACSR
+ * and MSA regs, but the values in them are from
+ * other task before current task, restore them
+ * from saved fp/msa context
+ */
+ write_msa_csr(current->thread.fpu.msacsr);
+ /*
+ * own_fpu_inatomic(1) just restore low 64bit,
+ * fix the high 64bit
+ */
+ init_msa_upper();
set_thread_flag(TIF_USEDMSA);
set_thread_flag(TIF_MSA_CTX_LIVE);
}
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index f185a85a27c1..5e97e9d02f98 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -202,6 +202,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
/* These must appear regardless of . */
.gptab.sdata : {
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 7de85d2253ff..0c50ac444222 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -137,6 +137,8 @@ extern void kvm_init_loongson_ipi(struct kvm *kvm);
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
switch (type) {
+ case KVM_VM_MIPS_AUTO:
+ break;
#ifdef CONFIG_KVM_MIPS_VZ
case KVM_VM_MIPS_VZ:
#else
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 87fda0713b84..a46db0807195 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -308,8 +308,8 @@ EXPORT_SYMBOL(csum_partial)
/*
* checksum and copy routines based on memcpy.S
*
- * csum_partial_copy_nocheck(src, dst, len, sum)
- * __csum_partial_copy_kernel(src, dst, len, sum, errp)
+ * csum_partial_copy_nocheck(src, dst, len)
+ * __csum_partial_copy_kernel(src, dst, len)
*
* See "Spec" in memcpy.S for details. Unlike __copy_user, all
* function in this file use the standard calling convention.
@@ -318,26 +318,11 @@ EXPORT_SYMBOL(csum_partial)
#define src a0
#define dst a1
#define len a2
-#define psum a3
#define sum v0
#define odd t8
-#define errptr t9
/*
- * The exception handler for loads requires that:
- * 1- AT contain the address of the byte just past the end of the source
- * of the copy,
- * 2- src_entry <= src < AT, and
- * 3- (dst - src) == (dst_entry - src_entry),
- * The _entry suffix denotes values when __copy_user was called.
- *
- * (1) is set up up by __csum_partial_copy_from_user and maintained by
- * not writing AT in __csum_partial_copy
- * (2) is met by incrementing src by the number of bytes copied
- * (3) is met by not doing loads between a pair of increments of dst and src
- *
- * The exception handlers for stores stores -EFAULT to errptr and return.
- * These handlers do not need to overwrite any data.
+ * All exception handlers simply return 0.
*/
/* Instruction type */
@@ -358,11 +343,11 @@ EXPORT_SYMBOL(csum_partial)
* addr : Address
* handler : Exception handler
*/
-#define EXC(insn, type, reg, addr, handler) \
+#define EXC(insn, type, reg, addr) \
.if \mode == LEGACY_MODE; \
9: insn reg, addr; \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR 9b, .L_exc; \
.previous; \
/* This is enabled in EVA mode */ \
.else; \
@@ -371,7 +356,7 @@ EXPORT_SYMBOL(csum_partial)
((\to == USEROP) && (type == ST_INSN)); \
9: __BUILD_EVA_INSN(insn##e, reg, addr); \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR 9b, .L_exc; \
.previous; \
.else; \
/* EVA without exception */ \
@@ -384,14 +369,14 @@ EXPORT_SYMBOL(csum_partial)
#ifdef USE_DOUBLE
#define LOADK ld /* No exception */
-#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
-#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
-#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
-#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
-#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
-#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
-#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
-#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
+#define LOAD(reg, addr) EXC(ld, LD_INSN, reg, addr)
+#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr)
+#define LOADL(reg, addr) EXC(ldl, LD_INSN, reg, addr)
+#define LOADR(reg, addr) EXC(ldr, LD_INSN, reg, addr)
+#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr)
+#define STOREL(reg, addr) EXC(sdl, ST_INSN, reg, addr)
+#define STORER(reg, addr) EXC(sdr, ST_INSN, reg, addr)
+#define STORE(reg, addr) EXC(sd, ST_INSN, reg, addr)
#define ADD daddu
#define SUB dsubu
#define SRL dsrl
@@ -404,14 +389,14 @@ EXPORT_SYMBOL(csum_partial)
#else
#define LOADK lw /* No exception */
-#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
-#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
-#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
-#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
-#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
-#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
-#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
-#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
+#define LOAD(reg, addr) EXC(lw, LD_INSN, reg, addr)
+#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr)
+#define LOADL(reg, addr) EXC(lwl, LD_INSN, reg, addr)
+#define LOADR(reg, addr) EXC(lwr, LD_INSN, reg, addr)
+#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr)
+#define STOREL(reg, addr) EXC(swl, ST_INSN, reg, addr)
+#define STORER(reg, addr) EXC(swr, ST_INSN, reg, addr)
+#define STORE(reg, addr) EXC(sw, ST_INSN, reg, addr)
#define ADD addu
#define SUB subu
#define SRL srl
@@ -450,22 +435,9 @@ EXPORT_SYMBOL(csum_partial)
.set at=v1
#endif
- .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
+ .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to
- PTR_ADDU AT, src, len /* See (1) above. */
- /* initialize __nocheck if this the first time we execute this
- * macro
- */
-#ifdef CONFIG_64BIT
- move errptr, a4
-#else
- lw errptr, 16(sp)
-#endif
- .if \__nocheck == 1
- FEXPORT(csum_partial_copy_nocheck)
- EXPORT_SYMBOL(csum_partial_copy_nocheck)
- .endif
- move sum, zero
+ li sum, -1
move odd, zero
/*
* Note: dst & src may be unaligned, len may be 0
@@ -497,31 +469,31 @@ EXPORT_SYMBOL(csum_partial)
SUB len, 8*NBYTES # subtract here for bgez loop
.align 4
1:
- LOAD(t0, UNIT(0)(src), .Ll_exc\@)
- LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
- LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
- LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
- LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
- LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
- LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
- LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
+ LOAD(t0, UNIT(0)(src))
+ LOAD(t1, UNIT(1)(src))
+ LOAD(t2, UNIT(2)(src))
+ LOAD(t3, UNIT(3)(src))
+ LOAD(t4, UNIT(4)(src))
+ LOAD(t5, UNIT(5)(src))
+ LOAD(t6, UNIT(6)(src))
+ LOAD(t7, UNIT(7)(src))
SUB len, len, 8*NBYTES
ADD src, src, 8*NBYTES
- STORE(t0, UNIT(0)(dst), .Ls_exc\@)
+ STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
- STORE(t1, UNIT(1)(dst), .Ls_exc\@)
+ STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
- STORE(t2, UNIT(2)(dst), .Ls_exc\@)
+ STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
- STORE(t3, UNIT(3)(dst), .Ls_exc\@)
+ STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
- STORE(t4, UNIT(4)(dst), .Ls_exc\@)
+ STORE(t4, UNIT(4)(dst))
ADDC(t4, t5)
- STORE(t5, UNIT(5)(dst), .Ls_exc\@)
+ STORE(t5, UNIT(5)(dst))
ADDC(sum, t4)
- STORE(t6, UNIT(6)(dst), .Ls_exc\@)
+ STORE(t6, UNIT(6)(dst))
ADDC(t6, t7)
- STORE(t7, UNIT(7)(dst), .Ls_exc\@)
+ STORE(t7, UNIT(7)(dst))
ADDC(sum, t6)
.set reorder /* DADDI_WAR */
ADD dst, dst, 8*NBYTES
@@ -541,19 +513,19 @@ EXPORT_SYMBOL(csum_partial)
/*
* len >= 4*NBYTES
*/
- LOAD(t0, UNIT(0)(src), .Ll_exc\@)
- LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
- LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
- LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
+ LOAD(t0, UNIT(0)(src))
+ LOAD(t1, UNIT(1)(src))
+ LOAD(t2, UNIT(2)(src))
+ LOAD(t3, UNIT(3)(src))
SUB len, len, 4*NBYTES
ADD src, src, 4*NBYTES
- STORE(t0, UNIT(0)(dst), .Ls_exc\@)
+ STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
- STORE(t1, UNIT(1)(dst), .Ls_exc\@)
+ STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
- STORE(t2, UNIT(2)(dst), .Ls_exc\@)
+ STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
- STORE(t3, UNIT(3)(dst), .Ls_exc\@)
+ STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
@@ -566,10 +538,10 @@ EXPORT_SYMBOL(csum_partial)
beq rem, len, .Lcopy_bytes\@
nop
1:
- LOAD(t0, 0(src), .Ll_exc\@)
+ LOAD(t0, 0(src))
ADD src, src, NBYTES
SUB len, len, NBYTES
- STORE(t0, 0(dst), .Ls_exc\@)
+ STORE(t0, 0(dst))
ADDC(sum, t0)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
@@ -592,10 +564,10 @@ EXPORT_SYMBOL(csum_partial)
ADD t1, dst, len # t1 is just past last byte of dst
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
- LOAD(t0, 0(src), .Ll_exc\@)
+ LOAD(t0, 0(src))
SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
- STREST(t0, -1(t1), .Ls_exc\@)
+ STREST(t0, -1(t1))
SHIFT_DISCARD_REVERT t0, t0, bits
.set reorder
ADDC(sum, t0)
@@ -612,12 +584,12 @@ EXPORT_SYMBOL(csum_partial)
* Set match = (src and dst have same alignment)
*/
#define match rem
- LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
+ LDFIRST(t3, FIRST(0)(src))
ADD t2, zero, NBYTES
- LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
+ LDREST(t3, REST(0)(src))
SUB t2, t2, t1 # t2 = number of bytes copied
xor match, t0, t1
- STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
+ STFIRST(t3, FIRST(0)(dst))
SLL t4, t1, 3 # t4 = number of bits to discard
SHIFT_DISCARD t3, t3, t4
/* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
@@ -639,26 +611,26 @@ EXPORT_SYMBOL(csum_partial)
* It's OK to load FIRST(N+1) before REST(N) because the two addresses
* are to the same unit (unless src is aligned, but it's not).
*/
- LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
- LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
+ LDFIRST(t0, FIRST(0)(src))
+ LDFIRST(t1, FIRST(1)(src))
SUB len, len, 4*NBYTES
- LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
- LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
- LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
- LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
- LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
- LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
+ LDREST(t0, REST(0)(src))
+ LDREST(t1, REST(1)(src))
+ LDFIRST(t2, FIRST(2)(src))
+ LDFIRST(t3, FIRST(3)(src))
+ LDREST(t2, REST(2)(src))
+ LDREST(t3, REST(3)(src))
ADD src, src, 4*NBYTES
#ifdef CONFIG_CPU_SB1
nop # improves slotting
#endif
- STORE(t0, UNIT(0)(dst), .Ls_exc\@)
+ STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
- STORE(t1, UNIT(1)(dst), .Ls_exc\@)
+ STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
- STORE(t2, UNIT(2)(dst), .Ls_exc\@)
+ STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
- STORE(t3, UNIT(3)(dst), .Ls_exc\@)
+ STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
@@ -671,11 +643,11 @@ EXPORT_SYMBOL(csum_partial)
beq rem, len, .Lcopy_bytes\@
nop
1:
- LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
- LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+ LDFIRST(t0, FIRST(0)(src))
+ LDREST(t0, REST(0)(src))
ADD src, src, NBYTES
SUB len, len, NBYTES
- STORE(t0, 0(dst), .Ls_exc\@)
+ STORE(t0, 0(dst))
ADDC(sum, t0)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
@@ -696,11 +668,10 @@ EXPORT_SYMBOL(csum_partial)
#endif
move t2, zero # partial word
li t3, SHIFT_START # shift
-/* use .Ll_exc_copy here to return correct sum on fault */
#define COPY_BYTE(N) \
- LOADBU(t0, N(src), .Ll_exc_copy\@); \
+ LOADBU(t0, N(src)); \
SUB len, len, 1; \
- STOREB(t0, N(dst), .Ls_exc\@); \
+ STOREB(t0, N(dst)); \
SLLV t0, t0, t3; \
addu t3, SHIFT_INC; \
beqz len, .Lcopy_bytes_done\@; \
@@ -714,9 +685,9 @@ EXPORT_SYMBOL(csum_partial)
COPY_BYTE(4)
COPY_BYTE(5)
#endif
- LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
+ LOADBU(t0, NBYTES-2(src))
SUB len, len, 1
- STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
+ STOREB(t0, NBYTES-2(dst))
SLLV t0, t0, t3
or t2, t0
.Lcopy_bytes_done\@:
@@ -753,97 +724,31 @@ EXPORT_SYMBOL(csum_partial)
#endif
.set pop
.set reorder
- ADDC32(sum, psum)
jr ra
.set noreorder
+ .endm
-.Ll_exc_copy\@:
- /*
- * Copy bytes from src until faulting load address (or until a
- * lb faults)
- *
- * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
- * may be more than a byte beyond the last address.
- * Hence, the lb below may get an exception.
- *
- * Assumes src < THREAD_BUADDR($28)
- */
- LOADK t0, TI_TASK($28)
- li t2, SHIFT_START
- LOADK t0, THREAD_BUADDR(t0)
-1:
- LOADBU(t1, 0(src), .Ll_exc\@)
- ADD src, src, 1
- sb t1, 0(dst) # can't fault -- we're copy_from_user
- SLLV t1, t1, t2
- addu t2, SHIFT_INC
- ADDC(sum, t1)
- .set reorder /* DADDI_WAR */
- ADD dst, dst, 1
- bne src, t0, 1b
- .set noreorder
-.Ll_exc\@:
- LOADK t0, TI_TASK($28)
- nop
- LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
- nop
- SUB len, AT, t0 # len number of uncopied bytes
- /*
- * Here's where we rely on src and dst being incremented in tandem,
- * See (3) above.
- * dst += (fault addr - src) to put dst at first byte to clear
- */
- ADD dst, t0 # compute start address in a1
- SUB dst, src
- /*
- * Clear len bytes starting at dst. Can't call __bzero because it
- * might modify len. An inefficient loop for these rare times...
- */
- .set reorder /* DADDI_WAR */
- SUB src, len, 1
- beqz len, .Ldone\@
- .set noreorder
-1: sb zero, 0(dst)
- ADD dst, dst, 1
- .set push
- .set noat
-#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
- bnez src, 1b
- SUB src, src, 1
-#else
- li v1, 1
- bnez src, 1b
- SUB src, src, v1
-#endif
- li v1, -EFAULT
- b .Ldone\@
- sw v1, (errptr)
-
-.Ls_exc\@:
- li v0, -1 /* invalid checksum */
- li v1, -EFAULT
+ .set noreorder
+.L_exc:
jr ra
- sw v1, (errptr)
- .set pop
- .endm
+ li v0, 0
-LEAF(__csum_partial_copy_kernel)
-EXPORT_SYMBOL(__csum_partial_copy_kernel)
+FEXPORT(__csum_partial_copy_nocheck)
+EXPORT_SYMBOL(__csum_partial_copy_nocheck)
#ifndef CONFIG_EVA
FEXPORT(__csum_partial_copy_to_user)
EXPORT_SYMBOL(__csum_partial_copy_to_user)
FEXPORT(__csum_partial_copy_from_user)
EXPORT_SYMBOL(__csum_partial_copy_from_user)
#endif
-__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
-END(__csum_partial_copy_kernel)
+__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP
#ifdef CONFIG_EVA
LEAF(__csum_partial_copy_to_user)
-__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP
END(__csum_partial_copy_to_user)
LEAF(__csum_partial_copy_from_user)
-__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP
END(__csum_partial_copy_from_user)
#endif
diff --git a/arch/mips/loongson2ef/Platform b/arch/mips/loongson2ef/Platform
index 4ab55f1123a0..ae023b9a1c51 100644
--- a/arch/mips/loongson2ef/Platform
+++ b/arch/mips/loongson2ef/Platform
@@ -44,6 +44,10 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
endif
endif
+# Some -march= flags enable MMI instructions, and GCC complains about that
+# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
+cflags-y += $(call cc-option,-mno-loongson-mmi)
+
#
# Loongson Machines' Support
#
diff --git a/arch/mips/loongson64/cop2-ex.c b/arch/mips/loongson64/cop2-ex.c
index f130f62129b8..00055d4b6042 100644
--- a/arch/mips/loongson64/cop2-ex.c
+++ b/arch/mips/loongson64/cop2-ex.c
@@ -95,10 +95,8 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
if (res)
goto fault;
- set_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lswc2_format.rt, value);
- set_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lswc2_format.rq, value_next);
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0, value);
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0, value_next);
compute_return_epc(regs);
own_fpu(1);
}
@@ -130,15 +128,13 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
goto sigbus;
lose_fpu(1);
- value_next = get_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lswc2_format.rq);
+ value_next = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0);
StoreDW(addr + 8, value_next, res);
if (res)
goto fault;
- value = get_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lswc2_format.rt);
+ value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0);
StoreDW(addr, value, res);
if (res)
@@ -204,8 +200,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
if (res)
goto fault;
- set_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lsdc2_format.rt, value);
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
compute_return_epc(regs);
own_fpu(1);
@@ -221,8 +216,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
if (res)
goto fault;
- set_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lsdc2_format.rt, value);
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
compute_return_epc(regs);
own_fpu(1);
break;
@@ -286,8 +280,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
goto sigbus;
lose_fpu(1);
- value = get_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lsdc2_format.rt);
+ value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
StoreW(addr, value, res);
if (res)
@@ -305,8 +298,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
goto sigbus;
lose_fpu(1);
- value = get_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lsdc2_format.rt);
+ value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
StoreDW(addr, value, res);
if (res)
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index fc5a6d25f74f..0ef717093262 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1712,7 +1712,11 @@ static void setup_scache(void)
printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
scache_size >> 10,
way_string[c->scache.ways], c->scache.linesz);
+
+ if (current_cpu_type() == CPU_BMIPS5000)
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
+
#else
if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 1493c49ca47a..55d7b7fd18b6 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -245,7 +245,6 @@ static int mipsxx_perfcount_handler(void)
switch (counters) {
#define HANDLE_COUNTER(n) \
- fallthrough; \
case n + 1: \
control = r_c0_perfctrl ## n(); \
counter = r_c0_perfcntr ## n(); \
@@ -256,8 +255,11 @@ static int mipsxx_perfcount_handler(void)
handled = IRQ_HANDLED; \
}
HANDLE_COUNTER(3)
+ fallthrough;
HANDLE_COUNTER(2)
+ fallthrough;
HANDLE_COUNTER(1)
+ fallthrough;
HANDLE_COUNTER(0)
}
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
index 0ecffb65fd6d..eeeec18c420a 100644
--- a/arch/mips/sni/a20r.c
+++ b/arch/mips/sni/a20r.c
@@ -143,7 +143,10 @@ static struct platform_device sc26xx_pdev = {
},
};
-static u32 a20r_ack_hwint(void)
+/*
+ * Trigger chipset to update CPU's CAUSE IP field
+ */
+static u32 a20r_update_cause_ip(void)
{
u32 status = read_c0_status();
@@ -205,12 +208,14 @@ static void a20r_hwint(void)
int irq;
clear_c0_status(IE_IRQ0);
- status = a20r_ack_hwint();
+ status = a20r_update_cause_ip();
cause = read_c0_cause();
irq = ffs(((cause & status) >> 8) & 0xf8);
if (likely(irq > 0))
do_IRQ(SNI_A20R_IRQ_BASE + irq - 1);
+
+ a20r_update_cause_ip();
set_c0_status(IE_IRQ0);
}
@@ -222,8 +227,8 @@ void __init sni_a20r_irq_init(void)
irq_set_chip_and_handler(i, &a20r_irq_type, handle_level_irq);
sni_hwint = a20r_hwint;
change_c0_status(ST0_IM, IE_IRQ0);
- if (request_irq(SNI_A20R_IRQ_BASE + 3, sni_isa_irq_handler, 0, "ISA",
- NULL))
+ if (request_irq(SNI_A20R_IRQ_BASE + 3, sni_isa_irq_handler,
+ IRQF_SHARED, "ISA", sni_isa_irq_handler))
pr_err("Failed to register ISA interrupt\n");
}