summaryrefslogtreecommitdiff
path: root/arch/mips/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/lib')
-rw-r--r--arch/mips/lib/Makefile1
-rw-r--r--arch/mips/lib/bitops.c14
-rw-r--r--arch/mips/lib/bswapdi.c14
-rw-r--r--arch/mips/lib/bswapsi.c10
-rw-r--r--arch/mips/lib/csum_partial.S269
-rw-r--r--arch/mips/lib/delay.c5
-rw-r--r--arch/mips/lib/dump_tlb.c11
-rw-r--r--arch/mips/lib/iomap-pci.c2
-rw-r--r--arch/mips/lib/memcpy.S52
-rw-r--r--arch/mips/lib/memset.S23
-rw-r--r--arch/mips/lib/mips-atomic.c16
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c5
-rw-r--r--arch/mips/lib/strncpy_user.S52
-rw-r--r--arch/mips/lib/strnlen_user.S48
-rw-r--r--arch/mips/lib/uncached.c4
15 files changed, 194 insertions, 332 deletions
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 479f50559c83..5d5b993cbc2b 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -13,7 +13,6 @@ lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y))
obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o
obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o
-obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o
# libgcc-style stuff needed in the kernel
obj-y += bswapsi.o bswapdi.o multi3.o
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
index 116d0bd8b2ae..00aee98e9d54 100644
--- a/arch/mips/lib/bitops.c
+++ b/arch/mips/lib/bitops.c
@@ -146,3 +146,17 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
return res;
}
EXPORT_SYMBOL(__mips_test_and_change_bit);
+
+bool __mips_xor_is_negative_byte(unsigned long mask,
+ volatile unsigned long *addr)
+{
+ unsigned long flags;
+ unsigned long data;
+
+ raw_local_irq_save(flags);
+ data = *addr;
+ *addr = data ^ mask;
+ raw_local_irq_restore(flags);
+
+ return (data & BIT(7)) != 0;
+}
diff --git a/arch/mips/lib/bswapdi.c b/arch/mips/lib/bswapdi.c
index fcef74084492..88242dc7de17 100644
--- a/arch/mips/lib/bswapdi.c
+++ b/arch/mips/lib/bswapdi.c
@@ -1,17 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/compiler.h>
+#include <uapi/linux/swab.h>
+
+/* To silence -Wmissing-prototypes. */
+unsigned long long __bswapdi2(unsigned long long u);
unsigned long long notrace __bswapdi2(unsigned long long u)
{
- return (((u) & 0xff00000000000000ull) >> 56) |
- (((u) & 0x00ff000000000000ull) >> 40) |
- (((u) & 0x0000ff0000000000ull) >> 24) |
- (((u) & 0x000000ff00000000ull) >> 8) |
- (((u) & 0x00000000ff000000ull) << 8) |
- (((u) & 0x0000000000ff0000ull) << 24) |
- (((u) & 0x000000000000ff00ull) << 40) |
- (((u) & 0x00000000000000ffull) << 56);
+ return ___constant_swab64(u);
}
-
EXPORT_SYMBOL(__bswapdi2);
diff --git a/arch/mips/lib/bswapsi.c b/arch/mips/lib/bswapsi.c
index 22d8e4f6d66e..2ed655497de5 100644
--- a/arch/mips/lib/bswapsi.c
+++ b/arch/mips/lib/bswapsi.c
@@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/compiler.h>
+#include <uapi/linux/swab.h>
+
+/* To silence -Wmissing-prototypes. */
+unsigned int __bswapsi2(unsigned int u);
unsigned int notrace __bswapsi2(unsigned int u)
{
- return (((u) & 0xff000000) >> 24) |
- (((u) & 0x00ff0000) >> 8) |
- (((u) & 0x0000ff00) << 8) |
- (((u) & 0x000000ff) << 24);
+ return ___constant_swab32(u);
}
-
EXPORT_SYMBOL(__bswapsi2);
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index fda7b57b826e..3d2ff4118d79 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -11,9 +11,9 @@
* Copyright (C) 2014 Imagination Technologies Ltd.
*/
#include <linux/errno.h>
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
-#include <asm/export.h>
#include <asm/regdef.h>
#ifdef CONFIG_64BIT
@@ -279,7 +279,8 @@ EXPORT_SYMBOL(csum_partial)
#endif
/* odd buffer alignment? */
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON64)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
+ defined(CONFIG_CPU_LOONGSON64)
.set push
.set arch=mips32r2
wsbh v1, sum
@@ -307,8 +308,8 @@ EXPORT_SYMBOL(csum_partial)
/*
* checksum and copy routines based on memcpy.S
*
- * csum_partial_copy_nocheck(src, dst, len, sum)
- * __csum_partial_copy_kernel(src, dst, len, sum, errp)
+ * csum_partial_copy_nocheck(src, dst, len)
+ * __csum_partial_copy_kernel(src, dst, len)
*
* See "Spec" in memcpy.S for details. Unlike __copy_user, all
* function in this file use the standard calling convention.
@@ -317,26 +318,11 @@ EXPORT_SYMBOL(csum_partial)
#define src a0
#define dst a1
#define len a2
-#define psum a3
#define sum v0
#define odd t8
-#define errptr t9
/*
- * The exception handler for loads requires that:
- * 1- AT contain the address of the byte just past the end of the source
- * of the copy,
- * 2- src_entry <= src < AT, and
- * 3- (dst - src) == (dst_entry - src_entry),
- * The _entry suffix denotes values when __copy_user was called.
- *
- * (1) is set up up by __csum_partial_copy_from_user and maintained by
- * not writing AT in __csum_partial_copy
- * (2) is met by incrementing src by the number of bytes copied
- * (3) is met by not doing loads between a pair of increments of dst and src
- *
- * The exception handlers for stores stores -EFAULT to errptr and return.
- * These handlers do not need to overwrite any data.
+ * All exception handlers simply return 0.
*/
/* Instruction type */
@@ -357,11 +343,11 @@ EXPORT_SYMBOL(csum_partial)
* addr : Address
* handler : Exception handler
*/
-#define EXC(insn, type, reg, addr, handler) \
+#define EXC(insn, type, reg, addr) \
.if \mode == LEGACY_MODE; \
9: insn reg, addr; \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR_WD 9b, .L_exc; \
.previous; \
/* This is enabled in EVA mode */ \
.else; \
@@ -370,7 +356,7 @@ EXPORT_SYMBOL(csum_partial)
((\to == USEROP) && (type == ST_INSN)); \
9: __BUILD_EVA_INSN(insn##e, reg, addr); \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR_WD 9b, .L_exc; \
.previous; \
.else; \
/* EVA without exception */ \
@@ -383,14 +369,14 @@ EXPORT_SYMBOL(csum_partial)
#ifdef USE_DOUBLE
#define LOADK ld /* No exception */
-#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
-#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
-#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
-#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
-#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
-#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
-#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
-#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
+#define LOAD(reg, addr) EXC(ld, LD_INSN, reg, addr)
+#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr)
+#define LOADL(reg, addr) EXC(ldl, LD_INSN, reg, addr)
+#define LOADR(reg, addr) EXC(ldr, LD_INSN, reg, addr)
+#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr)
+#define STOREL(reg, addr) EXC(sdl, ST_INSN, reg, addr)
+#define STORER(reg, addr) EXC(sdr, ST_INSN, reg, addr)
+#define STORE(reg, addr) EXC(sd, ST_INSN, reg, addr)
#define ADD daddu
#define SUB dsubu
#define SRL dsrl
@@ -403,14 +389,14 @@ EXPORT_SYMBOL(csum_partial)
#else
#define LOADK lw /* No exception */
-#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
-#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
-#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
-#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
-#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
-#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
-#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
-#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
+#define LOAD(reg, addr) EXC(lw, LD_INSN, reg, addr)
+#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr)
+#define LOADL(reg, addr) EXC(lwl, LD_INSN, reg, addr)
+#define LOADR(reg, addr) EXC(lwr, LD_INSN, reg, addr)
+#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr)
+#define STOREL(reg, addr) EXC(swl, ST_INSN, reg, addr)
+#define STORER(reg, addr) EXC(swr, ST_INSN, reg, addr)
+#define STORE(reg, addr) EXC(sw, ST_INSN, reg, addr)
#define ADD addu
#define SUB subu
#define SRL srl
@@ -449,22 +435,9 @@ EXPORT_SYMBOL(csum_partial)
.set at=v1
#endif
- .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
+ .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to
- PTR_ADDU AT, src, len /* See (1) above. */
- /* initialize __nocheck if this the first time we execute this
- * macro
- */
-#ifdef CONFIG_64BIT
- move errptr, a4
-#else
- lw errptr, 16(sp)
-#endif
- .if \__nocheck == 1
- FEXPORT(csum_partial_copy_nocheck)
- EXPORT_SYMBOL(csum_partial_copy_nocheck)
- .endif
- move sum, zero
+ li sum, -1
move odd, zero
/*
* Note: dst & src may be unaligned, len may be 0
@@ -496,31 +469,31 @@ EXPORT_SYMBOL(csum_partial)
SUB len, 8*NBYTES # subtract here for bgez loop
.align 4
1:
- LOAD(t0, UNIT(0)(src), .Ll_exc\@)
- LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
- LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
- LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
- LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
- LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
- LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
- LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
+ LOAD(t0, UNIT(0)(src))
+ LOAD(t1, UNIT(1)(src))
+ LOAD(t2, UNIT(2)(src))
+ LOAD(t3, UNIT(3)(src))
+ LOAD(t4, UNIT(4)(src))
+ LOAD(t5, UNIT(5)(src))
+ LOAD(t6, UNIT(6)(src))
+ LOAD(t7, UNIT(7)(src))
SUB len, len, 8*NBYTES
ADD src, src, 8*NBYTES
- STORE(t0, UNIT(0)(dst), .Ls_exc\@)
+ STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
- STORE(t1, UNIT(1)(dst), .Ls_exc\@)
+ STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
- STORE(t2, UNIT(2)(dst), .Ls_exc\@)
+ STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
- STORE(t3, UNIT(3)(dst), .Ls_exc\@)
+ STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
- STORE(t4, UNIT(4)(dst), .Ls_exc\@)
+ STORE(t4, UNIT(4)(dst))
ADDC(t4, t5)
- STORE(t5, UNIT(5)(dst), .Ls_exc\@)
+ STORE(t5, UNIT(5)(dst))
ADDC(sum, t4)
- STORE(t6, UNIT(6)(dst), .Ls_exc\@)
+ STORE(t6, UNIT(6)(dst))
ADDC(t6, t7)
- STORE(t7, UNIT(7)(dst), .Ls_exc\@)
+ STORE(t7, UNIT(7)(dst))
ADDC(sum, t6)
.set reorder /* DADDI_WAR */
ADD dst, dst, 8*NBYTES
@@ -540,19 +513,19 @@ EXPORT_SYMBOL(csum_partial)
/*
* len >= 4*NBYTES
*/
- LOAD(t0, UNIT(0)(src), .Ll_exc\@)
- LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
- LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
- LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
+ LOAD(t0, UNIT(0)(src))
+ LOAD(t1, UNIT(1)(src))
+ LOAD(t2, UNIT(2)(src))
+ LOAD(t3, UNIT(3)(src))
SUB len, len, 4*NBYTES
ADD src, src, 4*NBYTES
- STORE(t0, UNIT(0)(dst), .Ls_exc\@)
+ STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
- STORE(t1, UNIT(1)(dst), .Ls_exc\@)
+ STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
- STORE(t2, UNIT(2)(dst), .Ls_exc\@)
+ STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
- STORE(t3, UNIT(3)(dst), .Ls_exc\@)
+ STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
@@ -565,10 +538,10 @@ EXPORT_SYMBOL(csum_partial)
beq rem, len, .Lcopy_bytes\@
nop
1:
- LOAD(t0, 0(src), .Ll_exc\@)
+ LOAD(t0, 0(src))
ADD src, src, NBYTES
SUB len, len, NBYTES
- STORE(t0, 0(dst), .Ls_exc\@)
+ STORE(t0, 0(dst))
ADDC(sum, t0)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
@@ -591,10 +564,10 @@ EXPORT_SYMBOL(csum_partial)
ADD t1, dst, len # t1 is just past last byte of dst
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
- LOAD(t0, 0(src), .Ll_exc\@)
+ LOAD(t0, 0(src))
SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
- STREST(t0, -1(t1), .Ls_exc\@)
+ STREST(t0, -1(t1))
SHIFT_DISCARD_REVERT t0, t0, bits
.set reorder
ADDC(sum, t0)
@@ -611,12 +584,12 @@ EXPORT_SYMBOL(csum_partial)
* Set match = (src and dst have same alignment)
*/
#define match rem
- LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
+ LDFIRST(t3, FIRST(0)(src))
ADD t2, zero, NBYTES
- LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
+ LDREST(t3, REST(0)(src))
SUB t2, t2, t1 # t2 = number of bytes copied
xor match, t0, t1
- STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
+ STFIRST(t3, FIRST(0)(dst))
SLL t4, t1, 3 # t4 = number of bits to discard
SHIFT_DISCARD t3, t3, t4
/* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
@@ -638,26 +611,26 @@ EXPORT_SYMBOL(csum_partial)
* It's OK to load FIRST(N+1) before REST(N) because the two addresses
* are to the same unit (unless src is aligned, but it's not).
*/
- LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
- LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
+ LDFIRST(t0, FIRST(0)(src))
+ LDFIRST(t1, FIRST(1)(src))
SUB len, len, 4*NBYTES
- LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
- LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
- LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
- LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
- LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
- LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
+ LDREST(t0, REST(0)(src))
+ LDREST(t1, REST(1)(src))
+ LDFIRST(t2, FIRST(2)(src))
+ LDFIRST(t3, FIRST(3)(src))
+ LDREST(t2, REST(2)(src))
+ LDREST(t3, REST(3)(src))
ADD src, src, 4*NBYTES
#ifdef CONFIG_CPU_SB1
nop # improves slotting
#endif
- STORE(t0, UNIT(0)(dst), .Ls_exc\@)
+ STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
- STORE(t1, UNIT(1)(dst), .Ls_exc\@)
+ STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
- STORE(t2, UNIT(2)(dst), .Ls_exc\@)
+ STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
- STORE(t3, UNIT(3)(dst), .Ls_exc\@)
+ STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
@@ -670,11 +643,11 @@ EXPORT_SYMBOL(csum_partial)
beq rem, len, .Lcopy_bytes\@
nop
1:
- LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
- LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+ LDFIRST(t0, FIRST(0)(src))
+ LDREST(t0, REST(0)(src))
ADD src, src, NBYTES
SUB len, len, NBYTES
- STORE(t0, 0(dst), .Ls_exc\@)
+ STORE(t0, 0(dst))
ADDC(sum, t0)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
@@ -695,11 +668,10 @@ EXPORT_SYMBOL(csum_partial)
#endif
move t2, zero # partial word
li t3, SHIFT_START # shift
-/* use .Ll_exc_copy here to return correct sum on fault */
#define COPY_BYTE(N) \
- LOADBU(t0, N(src), .Ll_exc_copy\@); \
+ LOADBU(t0, N(src)); \
SUB len, len, 1; \
- STOREB(t0, N(dst), .Ls_exc\@); \
+ STOREB(t0, N(dst)); \
SLLV t0, t0, t3; \
addu t3, SHIFT_INC; \
beqz len, .Lcopy_bytes_done\@; \
@@ -713,9 +685,9 @@ EXPORT_SYMBOL(csum_partial)
COPY_BYTE(4)
COPY_BYTE(5)
#endif
- LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
+ LOADBU(t0, NBYTES-2(src))
SUB len, len, 1
- STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
+ STOREB(t0, NBYTES-2(dst))
SLLV t0, t0, t3
or t2, t0
.Lcopy_bytes_done\@:
@@ -732,7 +704,8 @@ EXPORT_SYMBOL(csum_partial)
addu sum, v1
#endif
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON64)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
+ defined(CONFIG_CPU_LOONGSON64)
.set push
.set arch=mips32r2
wsbh v1, sum
@@ -751,97 +724,31 @@ EXPORT_SYMBOL(csum_partial)
#endif
.set pop
.set reorder
- ADDC32(sum, psum)
jr ra
.set noreorder
+ .endm
-.Ll_exc_copy\@:
- /*
- * Copy bytes from src until faulting load address (or until a
- * lb faults)
- *
- * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
- * may be more than a byte beyond the last address.
- * Hence, the lb below may get an exception.
- *
- * Assumes src < THREAD_BUADDR($28)
- */
- LOADK t0, TI_TASK($28)
- li t2, SHIFT_START
- LOADK t0, THREAD_BUADDR(t0)
-1:
- LOADBU(t1, 0(src), .Ll_exc\@)
- ADD src, src, 1
- sb t1, 0(dst) # can't fault -- we're copy_from_user
- SLLV t1, t1, t2
- addu t2, SHIFT_INC
- ADDC(sum, t1)
- .set reorder /* DADDI_WAR */
- ADD dst, dst, 1
- bne src, t0, 1b
- .set noreorder
-.Ll_exc\@:
- LOADK t0, TI_TASK($28)
- nop
- LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
- nop
- SUB len, AT, t0 # len number of uncopied bytes
- /*
- * Here's where we rely on src and dst being incremented in tandem,
- * See (3) above.
- * dst += (fault addr - src) to put dst at first byte to clear
- */
- ADD dst, t0 # compute start address in a1
- SUB dst, src
- /*
- * Clear len bytes starting at dst. Can't call __bzero because it
- * might modify len. An inefficient loop for these rare times...
- */
- .set reorder /* DADDI_WAR */
- SUB src, len, 1
- beqz len, .Ldone\@
- .set noreorder
-1: sb zero, 0(dst)
- ADD dst, dst, 1
- .set push
- .set noat
-#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
- bnez src, 1b
- SUB src, src, 1
-#else
- li v1, 1
- bnez src, 1b
- SUB src, src, v1
-#endif
- li v1, -EFAULT
- b .Ldone\@
- sw v1, (errptr)
-
-.Ls_exc\@:
- li v0, -1 /* invalid checksum */
- li v1, -EFAULT
+ .set noreorder
+.L_exc:
jr ra
- sw v1, (errptr)
- .set pop
- .endm
+ li v0, 0
-LEAF(__csum_partial_copy_kernel)
-EXPORT_SYMBOL(__csum_partial_copy_kernel)
+FEXPORT(__csum_partial_copy_nocheck)
+EXPORT_SYMBOL(__csum_partial_copy_nocheck)
#ifndef CONFIG_EVA
FEXPORT(__csum_partial_copy_to_user)
EXPORT_SYMBOL(__csum_partial_copy_to_user)
FEXPORT(__csum_partial_copy_from_user)
EXPORT_SYMBOL(__csum_partial_copy_from_user)
#endif
-__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
-END(__csum_partial_copy_kernel)
+__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP
#ifdef CONFIG_EVA
LEAF(__csum_partial_copy_to_user)
-__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP
END(__csum_partial_copy_to_user)
LEAF(__csum_partial_copy_from_user)
-__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP
END(__csum_partial_copy_from_user)
#endif
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
index 68c495ed71e3..ccdb1fc1e4bf 100644
--- a/arch/mips/lib/delay.c
+++ b/arch/mips/lib/delay.c
@@ -16,7 +16,6 @@
#include <asm/asm.h>
#include <asm/compiler.h>
-#include <asm/war.h>
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
#define GCC_DADDI_IMM_ASM() "I"
@@ -24,6 +23,8 @@
#define GCC_DADDI_IMM_ASM() "r"
#endif
+#ifndef CONFIG_HAVE_PLAT_DELAY
+
void __delay(unsigned long loops)
{
__asm__ __volatile__ (
@@ -63,3 +64,5 @@ void __ndelay(unsigned long ns)
__delay((ns * 0x00000005ull * HZ * lpj) >> 32);
}
EXPORT_SYMBOL(__ndelay);
+
+#endif
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 83ed37298e66..8ce97b8741fd 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -12,7 +12,6 @@
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
void dump_tlb_regs(void)
@@ -60,14 +59,6 @@ static inline const char *msk2str(unsigned int mask)
case PM_8M: return "8Mb";
case PM_32M: return "32Mb";
#endif
-#ifndef CONFIG_CPU_VR41XX
- case PM_1M: return "1Mb";
- case PM_4M: return "4Mb";
- case PM_16M: return "16Mb";
- case PM_64M: return "64Mb";
- case PM_256M: return "256Mb";
- case PM_1G: return "1Gb";
-#endif
}
return "";
}
@@ -80,7 +71,7 @@ static void dump_tlb(int first, int last)
unsigned int pagemask, guestctl1 = 0, c0, c1, i;
unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
- unsigned long uninitialized_var(s_mmid);
+ unsigned long s_mmid;
#ifdef CONFIG_32BIT
bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
int pwidth = xpa ? 11 : 8;
diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c
index 210f5a95ecb1..a9cb28813f0b 100644
--- a/arch/mips/lib/iomap-pci.c
+++ b/arch/mips/lib/iomap-pci.c
@@ -32,7 +32,7 @@ void __iomem *__pci_ioport_map(struct pci_dev *dev,
sprintf(name, "%04x:%02x", pci_domain_nr(bus), bus->number);
printk(KERN_WARNING "io_map_base of root PCI bus %s unset. "
"Trying to continue but you better\nfix this issue or "
- "report it to linux-mips@linux-mips.org or your "
+ "report it to linux-mips@vger.kernel.org or your "
"vendor.\n", name);
#ifdef CONFIG_PCI_DOMAINS
panic("To avoid data corruption io_map_base MUST be set with "
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index cdd19d8561e8..a4b4e805ff13 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -32,9 +32,9 @@
#undef CONFIG_CPU_HAS_PREFETCH
#endif
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
-#include <asm/export.h>
#include <asm/regdef.h>
#define dst a0
@@ -116,7 +116,7 @@
.if \mode == LEGACY_MODE; \
9: insn reg, addr; \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR_WD 9b, handler; \
.previous; \
/* This is assembled in EVA mode */ \
.else; \
@@ -125,7 +125,7 @@
((\to == USEROP) && (type == ST_INSN)); \
9: __BUILD_EVA_INSN(insn##e, reg, addr); \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR_WD 9b, handler; \
.previous; \
.else; \
/* \
@@ -301,14 +301,14 @@
and t0, src, ADDRMASK
PREFS( 0, 2*32(src) )
PREFD( 1, 2*32(dst) )
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
bnez t1, .Ldst_unaligned\@
nop
bnez t0, .Lsrc_unaligned_dst_aligned\@
-#else
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
or t0, t0, t1
bnez t0, .Lcopy_unaligned_bytes\@
-#endif
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
/*
* use delay slot for fall-through
* src and dst are aligned; need to compute rem
@@ -389,7 +389,7 @@
bne rem, len, 1b
.set noreorder
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
/*
* src and dst are aligned, need to copy rem bytes (rem < NBYTES)
* A loop would do only a byte at a time with possible branch
@@ -491,7 +491,7 @@
bne len, rem, 1b
.set noreorder
-#endif /* CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* !CONFIG_CPU_NO_LOAD_STORE_LR */
.Lcopy_bytes_checklen\@:
beqz len, .Ldone\@
nop
@@ -520,7 +520,7 @@
jr ra
nop
-#ifndef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifdef CONFIG_CPU_NO_LOAD_STORE_LR
.Lcopy_unaligned_bytes\@:
1:
COPY_BYTE(0)
@@ -534,7 +534,7 @@
ADD src, src, 8
b 1b
ADD dst, dst, 8
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
.if __memcpy == 1
END(memcpy)
.set __memcpy, 0
@@ -598,6 +598,7 @@ SEXC(1)
nop
.endm
+#ifndef CONFIG_HAVE_PLAT_MEMCPY
.align 5
LEAF(memmove)
EXPORT_SYMBOL(memmove)
@@ -660,11 +661,17 @@ LEAF(memcpy) /* a0=dst a1=src a2=len */
EXPORT_SYMBOL(memcpy)
move v0, dst /* return value */
.L__memcpy:
-FEXPORT(__copy_user)
-EXPORT_SYMBOL(__copy_user)
+#ifndef CONFIG_EVA
+FEXPORT(__raw_copy_from_user)
+EXPORT_SYMBOL(__raw_copy_from_user)
+FEXPORT(__raw_copy_to_user)
+EXPORT_SYMBOL(__raw_copy_to_user)
+#endif
/* Legacy Mode, user <-> user */
__BUILD_COPY_USER LEGACY_MODE USEROP USEROP
+#endif
+
#ifdef CONFIG_EVA
/*
@@ -678,10 +685,10 @@ EXPORT_SYMBOL(__copy_user)
* __copy_from_user (EVA)
*/
-LEAF(__copy_from_user_eva)
-EXPORT_SYMBOL(__copy_from_user_eva)
+LEAF(__raw_copy_from_user)
+EXPORT_SYMBOL(__raw_copy_from_user)
__BUILD_COPY_USER EVA_MODE USEROP KERNELOP
-END(__copy_from_user_eva)
+END(__raw_copy_from_user)
@@ -689,18 +696,9 @@ END(__copy_from_user_eva)
* __copy_to_user (EVA)
*/
-LEAF(__copy_to_user_eva)
-EXPORT_SYMBOL(__copy_to_user_eva)
+LEAF(__raw_copy_to_user)
+EXPORT_SYMBOL(__raw_copy_to_user)
__BUILD_COPY_USER EVA_MODE KERNELOP USEROP
-END(__copy_to_user_eva)
-
-/*
- * __copy_in_user (EVA)
- */
-
-LEAF(__copy_in_user_eva)
-EXPORT_SYMBOL(__copy_in_user_eva)
-__BUILD_COPY_USER EVA_MODE USEROP USEROP
-END(__copy_in_user_eva)
+END(__raw_copy_to_user)
#endif
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 418611ef13cf..79405c32cc85 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -8,9 +8,9 @@
* Copyright (C) 2007 by Maciej W. Rozycki
* Copyright (C) 2011, 2012 MIPS Technologies, Inc.
*/
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
-#include <asm/export.h>
#include <asm/regdef.h>
#if LONGSIZE == 4
@@ -52,7 +52,7 @@
9: ___BUILD_EVA_INSN(insn, reg, addr); \
.endif; \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR_WD 9b, handler; \
.previous
.macro f_fill64 dst, offset, val, fixup, mode
@@ -115,7 +115,7 @@
#endif
.set reorder
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
R10KCBARRIER(0(ra))
#ifdef __MIPSEB__
EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
@@ -125,7 +125,7 @@
PTR_SUBU a0, t0 /* long align ptr */
PTR_ADDU a2, t0 /* correct size */
-#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
#define STORE_BYTE(N) \
EX(sb, a1, N(a0), .Lbyte_fixup\@); \
.set noreorder; \
@@ -150,7 +150,7 @@
ori a0, STORMASK
xori a0, STORMASK
PTR_ADDIU a0, STORSIZE
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
1: ori t1, a2, 0x3f /* # of full blocks */
xori t1, 0x3f
andi t0, a2, 0x40-STORSIZE
@@ -185,7 +185,7 @@
.set noreorder
beqz a2, 1f
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
PTR_ADDU a0, a2 /* What's left */
.set reorder
R10KCBARRIER(0(ra))
@@ -194,7 +194,7 @@
#else
EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
#endif
-#else
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
PTR_SUBU t0, $0, a2
.set reorder
move a2, zero /* No remaining longs */
@@ -211,7 +211,7 @@
EX(sb, a1, 6(a0), .Lbyte_fixup\@)
#endif
0:
-#endif
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
1: move a2, zero
jr ra
@@ -234,7 +234,7 @@
.hidden __memset
.endif
-#ifndef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifdef CONFIG_CPU_NO_LOAD_STORE_LR
.Lbyte_fixup\@:
/*
* unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1
@@ -243,7 +243,7 @@
PTR_SUBU a2, t0
PTR_ADDIU a2, 1
jr ra
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
.Lfirst_fixup\@:
/* unset_bytes already in a2 */
@@ -314,9 +314,6 @@ EXPORT_SYMBOL(memset)
#ifndef CONFIG_EVA
FEXPORT(__bzero)
EXPORT_SYMBOL(__bzero)
-#else
-FEXPORT(__bzero_kernel)
-EXPORT_SYMBOL(__bzero_kernel)
#endif
__BUILD_BZERO LEGACY_MODE
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index 5530070e0d05..a9b72eacfc0b 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -15,7 +15,7 @@
#include <linux/export.h>
#include <linux/stringify.h>
-#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)
+#if !defined(CONFIG_CPU_HAS_DIEI)
/*
* For cli() we have to insert nops to make sure that the new value
@@ -37,7 +37,7 @@
*/
notrace void arch_local_irq_disable(void)
{
- preempt_disable();
+ preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@@ -53,7 +53,7 @@ notrace void arch_local_irq_disable(void)
: /* no inputs */
: "memory");
- preempt_enable();
+ preempt_enable_notrace();
}
EXPORT_SYMBOL(arch_local_irq_disable);
@@ -61,7 +61,7 @@ notrace unsigned long arch_local_irq_save(void)
{
unsigned long flags;
- preempt_disable();
+ preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@@ -78,7 +78,7 @@ notrace unsigned long arch_local_irq_save(void)
: /* no inputs */
: "memory");
- preempt_enable();
+ preempt_enable_notrace();
return flags;
}
@@ -88,7 +88,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
- preempt_disable();
+ preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@@ -106,8 +106,8 @@ notrace void arch_local_irq_restore(unsigned long flags)
: "0" (flags)
: "memory");
- preempt_enable();
+ preempt_enable_notrace();
}
EXPORT_SYMBOL(arch_local_irq_restore);
-#endif /* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR6 */
+#endif /* !CONFIG_CPU_HAS_DIEI */
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index b97d9c5d8323..fcf594af0002 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -12,18 +12,13 @@
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
-extern int r3k_have_wired_reg;
-
void dump_tlb_regs(void)
{
pr_info("Index : %0x\n", read_c0_index());
pr_info("EntryHi : %0lx\n", read_c0_entryhi());
pr_info("EntryLo : %0lx\n", read_c0_entrylo0());
- if (r3k_have_wired_reg)
- pr_info("Wired : %0x\n", read_c0_wired());
}
static void dump_tlb(int first, int last)
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index acdff66bd5d2..94f4203563c1 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -7,15 +7,15 @@
* Copyright (C) 2011 MIPS Technologies, Inc.
*/
#include <linux/errno.h>
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
-#include <asm/export.h>
#include <asm/regdef.h>
#define EX(insn,reg,addr,handler) \
9: insn reg, addr; \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR_WD 9b, handler; \
.previous
/*
@@ -29,19 +29,17 @@
* it happens at most some bytes of the exceptions handlers will be copied.
*/
- .macro __BUILD_STRNCPY_ASM func
-LEAF(__strncpy_from_\func\()_asm)
- LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
- and v0, a1
- bnez v0, .Lfault\@
-
+LEAF(__strncpy_from_user_asm)
move t0, zero
move v1, a1
-.ifeqs "\func","kernel"
-1: EX(lbu, v0, (v1), .Lfault\@)
-.else
-1: EX(lbue, v0, (v1), .Lfault\@)
-.endif
+#ifdef CONFIG_EVA
+ .set push
+ .set eva
+1: EX(lbue, v0, (v1), .Lfault)
+ .set pop
+#else
+1: EX(lbu, v0, (v1), .Lfault)
+#endif
PTR_ADDIU v1, 1
R10KCBARRIER(0(ra))
sb v0, (a0)
@@ -51,35 +49,17 @@ LEAF(__strncpy_from_\func\()_asm)
bne t0, a2, 1b
2: PTR_ADDU v0, a1, t0
xor v0, a1
- bltz v0, .Lfault\@
+ bltz v0, .Lfault
move v0, t0
jr ra # return n
- END(__strncpy_from_\func\()_asm)
+ END(__strncpy_from_user_asm)
-.Lfault\@:
+.Lfault:
li v0, -EFAULT
jr ra
.section __ex_table,"a"
- PTR 1b, .Lfault\@
+ PTR_WD 1b, .Lfault
.previous
- .endm
-
-#ifndef CONFIG_EVA
- /* Set aliases */
- .global __strncpy_from_user_asm
- .set __strncpy_from_user_asm, __strncpy_from_kernel_asm
-EXPORT_SYMBOL(__strncpy_from_user_asm)
-#endif
-
-__BUILD_STRNCPY_ASM kernel
-EXPORT_SYMBOL(__strncpy_from_kernel_asm)
-
-#ifdef CONFIG_EVA
- .set push
- .set eva
-__BUILD_STRNCPY_ASM user
- .set pop
-EXPORT_SYMBOL(__strncpy_from_user_asm)
-#endif
+ EXPORT_SYMBOL(__strncpy_from_user_asm)
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index e1bacf5a3abe..c192a6f6cd84 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -6,15 +6,15 @@
* Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle
* Copyright (c) 1999 Silicon Graphics, Inc.
*/
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
-#include <asm/export.h>
#include <asm/regdef.h>
#define EX(insn,reg,addr,handler) \
9: insn reg, addr; \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR_WD 9b, handler; \
.previous
/*
@@ -26,12 +26,7 @@
* bytes. There's nothing secret there. On 64-bit accessing beyond
* the maximum is a tad hairier ...
*/
- .macro __BUILD_STRNLEN_ASM func
-LEAF(__strnlen_\func\()_asm)
- LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
- and v0, a0
- bnez v0, .Lfault\@
-
+LEAF(__strnlen_user_asm)
move v0, a0
PTR_ADDU a1, a0 # stop pointer
1:
@@ -40,11 +35,14 @@ LEAF(__strnlen_\func\()_asm)
li AT, 1
#endif
beq v0, a1, 1f # limit reached?
-.ifeqs "\func", "kernel"
- EX(lb, t0, (v0), .Lfault\@)
-.else
- EX(lbe, t0, (v0), .Lfault\@)
-.endif
+#ifdef CONFIG_EVA
+ .set push
+ .set eva
+ EX(lbe, t0, (v0), .Lfault)
+ .set pop
+#else
+ EX(lb, t0, (v0), .Lfault)
+#endif
.set noreorder
bnez t0, 1b
1:
@@ -57,28 +55,10 @@ LEAF(__strnlen_\func\()_asm)
.set reorder
PTR_SUBU v0, a0
jr ra
- END(__strnlen_\func\()_asm)
+ END(__strnlen_user_asm)
-.Lfault\@:
+.Lfault:
move v0, zero
jr ra
- .endm
-
-#ifndef CONFIG_EVA
- /* Set aliases */
- .global __strnlen_user_asm
- .set __strnlen_user_asm, __strnlen_kernel_asm
-EXPORT_SYMBOL(__strnlen_user_asm)
-#endif
-
-__BUILD_STRNLEN_ASM kernel
-EXPORT_SYMBOL(__strnlen_kernel_asm)
-
-#ifdef CONFIG_EVA
- .set push
- .set eva
-__BUILD_STRNLEN_ASM user
- .set pop
-EXPORT_SYMBOL(__strnlen_user_asm)
-#endif
+ EXPORT_SYMBOL(__strnlen_user_asm)
diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c
index 09d5deea747f..f80a67c092b6 100644
--- a/arch/mips/lib/uncached.c
+++ b/arch/mips/lib/uncached.c
@@ -37,10 +37,12 @@
*/
unsigned long run_uncached(void *func)
{
- register long sp __asm__("$sp");
register long ret __asm__("$2");
long lfunc = (long)func, ufunc;
long usp;
+ long sp;
+
+ __asm__("move %0, $sp" : "=r" (sp));
if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)
usp = CKSEG1ADDR(sp);