diff options
Diffstat (limited to 'arch/mips/lib')
| -rw-r--r-- | arch/mips/lib/.gitignore | 4 | ||||
| -rw-r--r-- | arch/mips/lib/Makefile | 9 | ||||
| -rw-r--r-- | arch/mips/lib/ashldi3.c | 29 | ||||
| -rw-r--r-- | arch/mips/lib/ashrdi3.c | 31 | ||||
| -rw-r--r-- | arch/mips/lib/bitops.c | 71 | ||||
| -rw-r--r-- | arch/mips/lib/bswapdi.c | 13 | ||||
| -rw-r--r-- | arch/mips/lib/bswapsi.c | 13 | ||||
| -rw-r--r-- | arch/mips/lib/cmpdi2.c | 27 | ||||
| -rw-r--r-- | arch/mips/lib/csum_partial.S | 400 | ||||
| -rw-r--r-- | arch/mips/lib/delay.c | 26 | ||||
| -rw-r--r-- | arch/mips/lib/dump_tlb.c | 184 | ||||
| -rw-r--r-- | arch/mips/lib/iomap-pci.c | 18 | ||||
| -rw-r--r-- | arch/mips/lib/iomap.c | 226 | ||||
| -rw-r--r-- | arch/mips/lib/iomap_copy.c | 29 | ||||
| -rw-r--r-- | arch/mips/lib/libgcc.h | 18 | ||||
| -rw-r--r-- | arch/mips/lib/lshrdi3.c | 29 | ||||
| -rw-r--r-- | arch/mips/lib/memcpy.S | 459 | ||||
| -rw-r--r-- | arch/mips/lib/memset.S | 245 | ||||
| -rw-r--r-- | arch/mips/lib/mips-atomic.c | 98 | ||||
| -rw-r--r-- | arch/mips/lib/multi3.c | 54 | ||||
| -rw-r--r-- | arch/mips/lib/r3k_dump_tlb.c | 38 | ||||
| -rw-r--r-- | arch/mips/lib/strlen_user.S | 40 | ||||
| -rw-r--r-- | arch/mips/lib/strncpy_user.S | 32 | ||||
| -rw-r--r-- | arch/mips/lib/strnlen_user.S | 36 | ||||
| -rw-r--r-- | arch/mips/lib/ucmpdi2.c | 21 | ||||
| -rw-r--r-- | arch/mips/lib/uncached.c | 7 |
26 files changed, 1077 insertions, 1080 deletions
diff --git a/arch/mips/lib/.gitignore b/arch/mips/lib/.gitignore new file mode 100644 index 000000000000..647d7a922e68 --- /dev/null +++ b/arch/mips/lib/.gitignore @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# This now-removed directory used to contain generated files. +/crypto/ diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index eeddc58802e1..5d5b993cbc2b 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -1,17 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 # # Makefile for MIPS-specific library files.. # lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ - mips-atomic.o strlen_user.o strncpy_user.o \ + mips-atomic.o strncpy_user.o \ strnlen_user.o uncached.o -obj-y += iomap.o +obj-y += iomap_copy.o obj-$(CONFIG_PCI) += iomap-pci.o +lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y)) obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o -obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o # libgcc-style stuff needed in the kernel -obj-y += ashldi3.o ashrdi3.o cmpdi2.o lshrdi3.o ucmpdi2.o +obj-y += bswapsi.o bswapdi.o multi3.o diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c deleted file mode 100644 index beb80f316095..000000000000 --- a/arch/mips/lib/ashldi3.c +++ /dev/null @@ -1,29 +0,0 @@ -#include <linux/module.h> - -#include "libgcc.h" - -long long __ashldi3(long long u, word_type b) -{ - DWunion uu, w; - word_type bm; - - if (b == 0) - return u; - - uu.ll = u; - bm = 32 - b; - - if (bm <= 0) { - w.s.low = 0; - w.s.high = (unsigned int) uu.s.low << -bm; - } else { - const unsigned int carries = (unsigned int) uu.s.low >> bm; - - w.s.low = (unsigned int) uu.s.low << b; - w.s.high = ((unsigned int) uu.s.high << b) | carries; - } - - return w.ll; -} - -EXPORT_SYMBOL(__ashldi3); diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c deleted file mode 100644 index c884a912b660..000000000000 --- a/arch/mips/lib/ashrdi3.c +++ /dev/null @@ -1,31 +0,0 @@ -#include <linux/module.h> - -#include "libgcc.h" - -long long __ashrdi3(long long u, word_type b) -{ - DWunion uu, w; - word_type bm; - - if (b == 0) - return u; - - uu.ll = u; - bm = 32 - b; - - if (bm <= 0) { - /* w.s.high = 1..1 or 0..0 */ - w.s.high = - uu.s.high >> 31; - w.s.low = uu.s.high >> -bm; - } else { - const unsigned int carries = (unsigned int) uu.s.high << bm; - - w.s.high = uu.s.high >> b; - w.s.low = ((unsigned int) uu.s.low >> b) | carries; - } - - return w.ll; -} - -EXPORT_SYMBOL(__ashrdi3); diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c index 3b2a1e78a543..00aee98e9d54 100644 --- a/arch/mips/lib/bitops.c +++ b/arch/mips/lib/bitops.c @@ -7,6 +7,7 @@ * Copyright (c) 1999, 2000 Silicon Graphics, Inc. */ #include <linux/bitops.h> +#include <linux/bits.h> #include <linux/irqflags.h> #include <linux/export.h> @@ -19,12 +20,11 @@ */ void __mips_set_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *a = (unsigned long *)addr; - unsigned bit = nr & SZLONG_MASK; + volatile unsigned long *a = &addr[BIT_WORD(nr)]; + unsigned int bit = nr % BITS_PER_LONG; unsigned long mask; unsigned long flags; - a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); *a |= mask; @@ -41,12 +41,11 @@ EXPORT_SYMBOL(__mips_set_bit); */ void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *a = (unsigned long *)addr; - unsigned bit = nr & SZLONG_MASK; + volatile unsigned long *a = &addr[BIT_WORD(nr)]; + unsigned int bit = nr % BITS_PER_LONG; unsigned long mask; unsigned long flags; - a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); *a &= ~mask; @@ -63,12 +62,11 @@ EXPORT_SYMBOL(__mips_clear_bit); */ void __mips_change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *a = (unsigned long *)addr; - unsigned bit = nr & SZLONG_MASK; + volatile unsigned long *a = &addr[BIT_WORD(nr)]; + unsigned int bit = nr % BITS_PER_LONG; unsigned long mask; unsigned long flags; - a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); *a ^= mask; @@ -78,32 +76,6 @@ EXPORT_SYMBOL(__mips_change_bit); /** - * __mips_test_and_set_bit - Set a bit and return its old value. This is - * called by test_and_set_bit() if it cannot find a faster solution. - * @nr: Bit to set - * @addr: Address to count from - */ -int __mips_test_and_set_bit(unsigned long nr, - volatile unsigned long *addr) -{ - unsigned long *a = (unsigned long *)addr; - unsigned bit = nr & SZLONG_MASK; - unsigned long mask; - unsigned long flags; - int res; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a) != 0; - *a |= mask; - raw_local_irq_restore(flags); - return res; -} -EXPORT_SYMBOL(__mips_test_and_set_bit); - - -/** * __mips_test_and_set_bit_lock - Set a bit and return its old value. This is * called by test_and_set_bit_lock() if it cannot find a faster solution. * @nr: Bit to set @@ -112,13 +84,12 @@ EXPORT_SYMBOL(__mips_test_and_set_bit); int __mips_test_and_set_bit_lock(unsigned long nr, volatile unsigned long *addr) { - unsigned long *a = (unsigned long *)addr; - unsigned bit = nr & SZLONG_MASK; + volatile unsigned long *a = &addr[BIT_WORD(nr)]; + unsigned int bit = nr % BITS_PER_LONG; unsigned long mask; unsigned long flags; int res; - a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); res = (mask & *a) != 0; @@ -137,13 +108,12 @@ EXPORT_SYMBOL(__mips_test_and_set_bit_lock); */ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *a = (unsigned long *)addr; - unsigned bit = nr & SZLONG_MASK; + volatile unsigned long *a = &addr[BIT_WORD(nr)]; + unsigned int bit = nr % BITS_PER_LONG; unsigned long mask; unsigned long flags; int res; - a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); res = (mask & *a) != 0; @@ -162,13 +132,12 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit); */ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *a = (unsigned long *)addr; - unsigned bit = nr & SZLONG_MASK; + volatile unsigned long *a = &addr[BIT_WORD(nr)]; + unsigned int bit = nr % BITS_PER_LONG; unsigned long mask; unsigned long flags; int res; - a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); res = (mask & *a) != 0; @@ -177,3 +146,17 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) return res; } EXPORT_SYMBOL(__mips_test_and_change_bit); + +bool __mips_xor_is_negative_byte(unsigned long mask, + volatile unsigned long *addr) +{ + unsigned long flags; + unsigned long data; + + raw_local_irq_save(flags); + data = *addr; + *addr = data ^ mask; + raw_local_irq_restore(flags); + + return (data & BIT(7)) != 0; +} diff --git a/arch/mips/lib/bswapdi.c b/arch/mips/lib/bswapdi.c new file mode 100644 index 000000000000..88242dc7de17 --- /dev/null +++ b/arch/mips/lib/bswapdi.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/export.h> +#include <linux/compiler.h> +#include <uapi/linux/swab.h> + +/* To silence -Wmissing-prototypes. */ +unsigned long long __bswapdi2(unsigned long long u); + +unsigned long long notrace __bswapdi2(unsigned long long u) +{ + return ___constant_swab64(u); +} +EXPORT_SYMBOL(__bswapdi2); diff --git a/arch/mips/lib/bswapsi.c b/arch/mips/lib/bswapsi.c new file mode 100644 index 000000000000..2ed655497de5 --- /dev/null +++ b/arch/mips/lib/bswapsi.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/export.h> +#include <linux/compiler.h> +#include <uapi/linux/swab.h> + +/* To silence -Wmissing-prototypes. */ +unsigned int __bswapsi2(unsigned int u); + +unsigned int notrace __bswapsi2(unsigned int u) +{ + return ___constant_swab32(u); +} +EXPORT_SYMBOL(__bswapsi2); diff --git a/arch/mips/lib/cmpdi2.c b/arch/mips/lib/cmpdi2.c deleted file mode 100644 index 8c1306437ed1..000000000000 --- a/arch/mips/lib/cmpdi2.c +++ /dev/null @@ -1,27 +0,0 @@ -#include <linux/module.h> - -#include "libgcc.h" - -word_type __cmpdi2(long long a, long long b) -{ - const DWunion au = { - .ll = a - }; - const DWunion bu = { - .ll = b - }; - - if (au.s.high < bu.s.high) - return 0; - else if (au.s.high > bu.s.high) - return 2; - - if ((unsigned int) au.s.low < (unsigned int) bu.s.low) - return 0; - else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) - return 2; - - return 1; -} - -EXPORT_SYMBOL(__cmpdi2); diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index a6adffbb4e5f..3d2ff4118d79 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S @@ -8,8 +8,10 @@ * Copyright (C) 1998, 1999 Ralf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) 2007 Maciej W. Rozycki + * Copyright (C) 2014 Imagination Technologies Ltd. */ #include <linux/errno.h> +#include <linux/export.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/regdef.h> @@ -55,24 +57,30 @@ #define UNIT(unit) ((unit)*NBYTES) #define ADDC(sum,reg) \ + .set push; \ + .set noat; \ ADD sum, reg; \ sltu v1, sum, reg; \ ADD sum, v1; \ + .set pop #define ADDC32(sum,reg) \ + .set push; \ + .set noat; \ addu sum, reg; \ sltu v1, sum, reg; \ addu sum, v1; \ + .set pop #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ LOAD _t0, (offset + UNIT(0))(src); \ LOAD _t1, (offset + UNIT(1))(src); \ LOAD _t2, (offset + UNIT(2))(src); \ LOAD _t3, (offset + UNIT(3))(src); \ + ADDC(_t0, _t1); \ + ADDC(_t2, _t3); \ ADDC(sum, _t0); \ - ADDC(sum, _t1); \ - ADDC(sum, _t2); \ - ADDC(sum, _t3) + ADDC(sum, _t2) #ifdef USE_DOUBLE #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \ @@ -96,6 +104,7 @@ .set noreorder .align 5 LEAF(csum_partial) +EXPORT_SYMBOL(csum_partial) move sum, zero move t7, zero @@ -270,9 +279,13 @@ LEAF(csum_partial) #endif /* odd buffer alignment? */ -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \ + defined(CONFIG_CPU_LOONGSON64) + .set push + .set arch=mips32r2 wsbh v1, sum movn sum, v1, t7 + .set pop #else beqz t7, 1f /* odd buffer alignment? */ lui v1, 0x00ff @@ -295,8 +308,8 @@ LEAF(csum_partial) /* * checksum and copy routines based on memcpy.S * - * csum_partial_copy_nocheck(src, dst, len, sum) - * __csum_partial_copy_user(src, dst, len, sum, errp) + * csum_partial_copy_nocheck(src, dst, len) + * __csum_partial_copy_kernel(src, dst, len) * * See "Spec" in memcpy.S for details. Unlike __copy_user, all * function in this file use the standard calling convention. @@ -305,42 +318,65 @@ LEAF(csum_partial) #define src a0 #define dst a1 #define len a2 -#define psum a3 #define sum v0 #define odd t8 -#define errptr t9 /* - * The exception handler for loads requires that: - * 1- AT contain the address of the byte just past the end of the source - * of the copy, - * 2- src_entry <= src < AT, and - * 3- (dst - src) == (dst_entry - src_entry), - * The _entry suffix denotes values when __copy_user was called. - * - * (1) is set up up by __csum_partial_copy_from_user and maintained by - * not writing AT in __csum_partial_copy - * (2) is met by incrementing src by the number of bytes copied - * (3) is met by not doing loads between a pair of increments of dst and src - * - * The exception handlers for stores stores -EFAULT to errptr and return. - * These handlers do not need to overwrite any data. + * All exception handlers simply return 0. */ -#define EXC(inst_reg,addr,handler) \ -9: inst_reg, addr; \ - .section __ex_table,"a"; \ - PTR 9b, handler; \ - .previous +/* Instruction type */ +#define LD_INSN 1 +#define ST_INSN 2 +#define LEGACY_MODE 1 +#define EVA_MODE 2 +#define USEROP 1 +#define KERNELOP 2 + +/* + * Wrapper to add an entry in the exception table + * in case the insn causes a memory exception. + * Arguments: + * insn : Load/store instruction + * type : Instruction type + * reg : Register + * addr : Address + * handler : Exception handler + */ +#define EXC(insn, type, reg, addr) \ + .if \mode == LEGACY_MODE; \ +9: insn reg, addr; \ + .section __ex_table,"a"; \ + PTR_WD 9b, .L_exc; \ + .previous; \ + /* This is enabled in EVA mode */ \ + .else; \ + /* If loading from user or storing to user */ \ + .if ((\from == USEROP) && (type == LD_INSN)) || \ + ((\to == USEROP) && (type == ST_INSN)); \ +9: __BUILD_EVA_INSN(insn##e, reg, addr); \ + .section __ex_table,"a"; \ + PTR_WD 9b, .L_exc; \ + .previous; \ + .else; \ + /* EVA without exception */ \ + insn reg, addr; \ + .endif; \ + .endif + +#undef LOAD #ifdef USE_DOUBLE -#define LOAD ld -#define LOADL ldl -#define LOADR ldr -#define STOREL sdl -#define STORER sdr -#define STORE sd +#define LOADK ld /* No exception */ +#define LOAD(reg, addr) EXC(ld, LD_INSN, reg, addr) +#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr) +#define LOADL(reg, addr) EXC(ldl, LD_INSN, reg, addr) +#define LOADR(reg, addr) EXC(ldr, LD_INSN, reg, addr) +#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr) +#define STOREL(reg, addr) EXC(sdl, ST_INSN, reg, addr) +#define STORER(reg, addr) EXC(sdr, ST_INSN, reg, addr) +#define STORE(reg, addr) EXC(sd, ST_INSN, reg, addr) #define ADD daddu #define SUB dsubu #define SRL dsrl @@ -352,12 +388,15 @@ LEAF(csum_partial) #else -#define LOAD lw -#define LOADL lwl -#define LOADR lwr -#define STOREL swl -#define STORER swr -#define STORE sw +#define LOADK lw /* No exception */ +#define LOAD(reg, addr) EXC(lw, LD_INSN, reg, addr) +#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr) +#define LOADL(reg, addr) EXC(lwl, LD_INSN, reg, addr) +#define LOADR(reg, addr) EXC(lwr, LD_INSN, reg, addr) +#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr) +#define STOREL(reg, addr) EXC(swl, ST_INSN, reg, addr) +#define STORER(reg, addr) EXC(swr, ST_INSN, reg, addr) +#define STORE(reg, addr) EXC(sw, ST_INSN, reg, addr) #define ADD addu #define SUB subu #define SRL srl @@ -396,15 +435,9 @@ LEAF(csum_partial) .set at=v1 #endif -LEAF(__csum_partial_copy_user) - PTR_ADDU AT, src, len /* See (1) above. */ -#ifdef CONFIG_64BIT - move errptr, a4 -#else - lw errptr, 16(sp) -#endif -FEXPORT(csum_partial_copy_nocheck) - move sum, zero + .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to + + li sum, -1 move odd, zero /* * Note: dst & src may be unaligned, len may be 0 @@ -419,49 +452,49 @@ FEXPORT(csum_partial_copy_nocheck) */ sltu t2, len, NBYTES and t1, dst, ADDRMASK - bnez t2, .Lcopy_bytes_checklen + bnez t2, .Lcopy_bytes_checklen\@ and t0, src, ADDRMASK andi odd, dst, 0x1 /* odd buffer? */ - bnez t1, .Ldst_unaligned + bnez t1, .Ldst_unaligned\@ nop - bnez t0, .Lsrc_unaligned_dst_aligned + bnez t0, .Lsrc_unaligned_dst_aligned\@ /* * use delay slot for fall-through * src and dst are aligned; need to compute rem */ -.Lboth_aligned: +.Lboth_aligned\@: SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter - beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES + beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES nop SUB len, 8*NBYTES # subtract here for bgez loop .align 4 1: -EXC( LOAD t0, UNIT(0)(src), .Ll_exc) -EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) -EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) -EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) -EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy) -EXC( LOAD t5, UNIT(5)(src), .Ll_exc_copy) -EXC( LOAD t6, UNIT(6)(src), .Ll_exc_copy) -EXC( LOAD t7, UNIT(7)(src), .Ll_exc_copy) + LOAD(t0, UNIT(0)(src)) + LOAD(t1, UNIT(1)(src)) + LOAD(t2, UNIT(2)(src)) + LOAD(t3, UNIT(3)(src)) + LOAD(t4, UNIT(4)(src)) + LOAD(t5, UNIT(5)(src)) + LOAD(t6, UNIT(6)(src)) + LOAD(t7, UNIT(7)(src)) SUB len, len, 8*NBYTES ADD src, src, 8*NBYTES -EXC( STORE t0, UNIT(0)(dst), .Ls_exc) + STORE(t0, UNIT(0)(dst)) + ADDC(t0, t1) + STORE(t1, UNIT(1)(dst)) ADDC(sum, t0) -EXC( STORE t1, UNIT(1)(dst), .Ls_exc) - ADDC(sum, t1) -EXC( STORE t2, UNIT(2)(dst), .Ls_exc) + STORE(t2, UNIT(2)(dst)) + ADDC(t2, t3) + STORE(t3, UNIT(3)(dst)) ADDC(sum, t2) -EXC( STORE t3, UNIT(3)(dst), .Ls_exc) - ADDC(sum, t3) -EXC( STORE t4, UNIT(4)(dst), .Ls_exc) + STORE(t4, UNIT(4)(dst)) + ADDC(t4, t5) + STORE(t5, UNIT(5)(dst)) ADDC(sum, t4) -EXC( STORE t5, UNIT(5)(dst), .Ls_exc) - ADDC(sum, t5) -EXC( STORE t6, UNIT(6)(dst), .Ls_exc) + STORE(t6, UNIT(6)(dst)) + ADDC(t6, t7) + STORE(t7, UNIT(7)(dst)) ADDC(sum, t6) -EXC( STORE t7, UNIT(7)(dst), .Ls_exc) - ADDC(sum, t7) .set reorder /* DADDI_WAR */ ADD dst, dst, 8*NBYTES bgez len, 1b @@ -471,44 +504,44 @@ EXC( STORE t7, UNIT(7)(dst), .Ls_exc) /* * len == the number of bytes left to copy < 8*NBYTES */ -.Lcleanup_both_aligned: +.Lcleanup_both_aligned\@: #define rem t7 - beqz len, .Ldone + beqz len, .Ldone\@ sltu t0, len, 4*NBYTES - bnez t0, .Lless_than_4units + bnez t0, .Lless_than_4units\@ and rem, len, (NBYTES-1) # rem = len % NBYTES /* * len >= 4*NBYTES */ -EXC( LOAD t0, UNIT(0)(src), .Ll_exc) -EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) -EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) -EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) + LOAD(t0, UNIT(0)(src)) + LOAD(t1, UNIT(1)(src)) + LOAD(t2, UNIT(2)(src)) + LOAD(t3, UNIT(3)(src)) SUB len, len, 4*NBYTES ADD src, src, 4*NBYTES -EXC( STORE t0, UNIT(0)(dst), .Ls_exc) + STORE(t0, UNIT(0)(dst)) + ADDC(t0, t1) + STORE(t1, UNIT(1)(dst)) ADDC(sum, t0) -EXC( STORE t1, UNIT(1)(dst), .Ls_exc) - ADDC(sum, t1) -EXC( STORE t2, UNIT(2)(dst), .Ls_exc) + STORE(t2, UNIT(2)(dst)) + ADDC(t2, t3) + STORE(t3, UNIT(3)(dst)) ADDC(sum, t2) -EXC( STORE t3, UNIT(3)(dst), .Ls_exc) - ADDC(sum, t3) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES - beqz len, .Ldone + beqz len, .Ldone\@ .set noreorder -.Lless_than_4units: +.Lless_than_4units\@: /* * rem = len % NBYTES */ - beq rem, len, .Lcopy_bytes + beq rem, len, .Lcopy_bytes\@ nop 1: -EXC( LOAD t0, 0(src), .Ll_exc) + LOAD(t0, 0(src)) ADD src, src, NBYTES SUB len, len, NBYTES -EXC( STORE t0, 0(dst), .Ls_exc) + STORE(t0, 0(dst)) ADDC(sum, t0) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES @@ -527,20 +560,20 @@ EXC( STORE t0, 0(dst), .Ls_exc) * more instruction-level parallelism. */ #define bits t2 - beqz len, .Ldone + beqz len, .Ldone\@ ADD t1, dst, len # t1 is just past last byte of dst li bits, 8*NBYTES SLL rem, len, 3 # rem = number of bits to keep -EXC( LOAD t0, 0(src), .Ll_exc) + LOAD(t0, 0(src)) SUB bits, bits, rem # bits = number of bits to discard SHIFT_DISCARD t0, t0, bits -EXC( STREST t0, -1(t1), .Ls_exc) + STREST(t0, -1(t1)) SHIFT_DISCARD_REVERT t0, t0, bits .set reorder ADDC(sum, t0) - b .Ldone + b .Ldone\@ .set noreorder -.Ldst_unaligned: +.Ldst_unaligned\@: /* * dst is unaligned * t0 = src & ADDRMASK @@ -551,25 +584,25 @@ EXC( STREST t0, -1(t1), .Ls_exc) * Set match = (src and dst have same alignment) */ #define match rem -EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc) + LDFIRST(t3, FIRST(0)(src)) ADD t2, zero, NBYTES -EXC( LDREST t3, REST(0)(src), .Ll_exc_copy) + LDREST(t3, REST(0)(src)) SUB t2, t2, t1 # t2 = number of bytes copied xor match, t0, t1 -EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc) + STFIRST(t3, FIRST(0)(dst)) SLL t4, t1, 3 # t4 = number of bits to discard SHIFT_DISCARD t3, t3, t4 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */ ADDC(sum, t3) - beq len, t2, .Ldone + beq len, t2, .Ldone\@ SUB len, len, t2 ADD dst, dst, t2 - beqz match, .Lboth_aligned + beqz match, .Lboth_aligned\@ ADD src, src, t2 -.Lsrc_unaligned_dst_aligned: +.Lsrc_unaligned_dst_aligned\@: SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter - beqz t0, .Lcleanup_src_unaligned + beqz t0, .Lcleanup_src_unaligned\@ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES 1: /* @@ -578,53 +611,53 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc) * It's OK to load FIRST(N+1) before REST(N) because the two addresses * are to the same unit (unless src is aligned, but it's not). */ -EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) -EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy) + LDFIRST(t0, FIRST(0)(src)) + LDFIRST(t1, FIRST(1)(src)) SUB len, len, 4*NBYTES -EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) -EXC( LDREST t1, REST(1)(src), .Ll_exc_copy) -EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy) -EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy) -EXC( LDREST t2, REST(2)(src), .Ll_exc_copy) -EXC( LDREST t3, REST(3)(src), .Ll_exc_copy) + LDREST(t0, REST(0)(src)) + LDREST(t1, REST(1)(src)) + LDFIRST(t2, FIRST(2)(src)) + LDFIRST(t3, FIRST(3)(src)) + LDREST(t2, REST(2)(src)) + LDREST(t3, REST(3)(src)) ADD src, src, 4*NBYTES #ifdef CONFIG_CPU_SB1 nop # improves slotting #endif -EXC( STORE t0, UNIT(0)(dst), .Ls_exc) + STORE(t0, UNIT(0)(dst)) + ADDC(t0, t1) + STORE(t1, UNIT(1)(dst)) ADDC(sum, t0) -EXC( STORE t1, UNIT(1)(dst), .Ls_exc) - ADDC(sum, t1) -EXC( STORE t2, UNIT(2)(dst), .Ls_exc) + STORE(t2, UNIT(2)(dst)) + ADDC(t2, t3) + STORE(t3, UNIT(3)(dst)) ADDC(sum, t2) -EXC( STORE t3, UNIT(3)(dst), .Ls_exc) - ADDC(sum, t3) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES bne len, rem, 1b .set noreorder -.Lcleanup_src_unaligned: - beqz len, .Ldone +.Lcleanup_src_unaligned\@: + beqz len, .Ldone\@ and rem, len, NBYTES-1 # rem = len % NBYTES - beq rem, len, .Lcopy_bytes + beq rem, len, .Lcopy_bytes\@ nop 1: -EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) -EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) + LDFIRST(t0, FIRST(0)(src)) + LDREST(t0, REST(0)(src)) ADD src, src, NBYTES SUB len, len, NBYTES -EXC( STORE t0, 0(dst), .Ls_exc) + STORE(t0, 0(dst)) ADDC(sum, t0) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES bne len, rem, 1b .set noreorder -.Lcopy_bytes_checklen: - beqz len, .Ldone +.Lcopy_bytes_checklen\@: + beqz len, .Ldone\@ nop -.Lcopy_bytes: +.Lcopy_bytes\@: /* 0 < len < NBYTES */ #ifdef CONFIG_CPU_LITTLE_ENDIAN #define SHIFT_START 0 @@ -635,14 +668,13 @@ EXC( STORE t0, 0(dst), .Ls_exc) #endif move t2, zero # partial word li t3, SHIFT_START # shift -/* use .Ll_exc_copy here to return correct sum on fault */ #define COPY_BYTE(N) \ -EXC( lbu t0, N(src), .Ll_exc_copy); \ + LOADBU(t0, N(src)); \ SUB len, len, 1; \ -EXC( sb t0, N(dst), .Ls_exc); \ + STOREB(t0, N(dst)); \ SLLV t0, t0, t3; \ addu t3, SHIFT_INC; \ - beqz len, .Lcopy_bytes_done; \ + beqz len, .Lcopy_bytes_done\@; \ or t2, t0 COPY_BYTE(0) @@ -653,15 +685,17 @@ EXC( sb t0, N(dst), .Ls_exc); \ COPY_BYTE(4) COPY_BYTE(5) #endif -EXC( lbu t0, NBYTES-2(src), .Ll_exc_copy) + LOADBU(t0, NBYTES-2(src)) SUB len, len, 1 -EXC( sb t0, NBYTES-2(dst), .Ls_exc) + STOREB(t0, NBYTES-2(dst)) SLLV t0, t0, t3 or t2, t0 -.Lcopy_bytes_done: +.Lcopy_bytes_done\@: ADDC(sum, t2) -.Ldone: +.Ldone\@: /* fold checksum */ + .set push + .set noat #ifdef USE_DOUBLE dsll32 v1, sum, 0 daddu sum, v1 @@ -670,9 +704,13 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc) addu sum, v1 #endif -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \ + defined(CONFIG_CPU_LOONGSON64) + .set push + .set arch=mips32r2 wsbh v1, sum movn sum, v1, odd + .set pop #else beqz odd, 1f /* odd buffer alignment? */ lui v1, 0x00ff @@ -684,77 +722,33 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc) or sum, sum, t0 1: #endif + .set pop .set reorder - ADDC32(sum, psum) jr ra .set noreorder + .endm -.Ll_exc_copy: - /* - * Copy bytes from src until faulting load address (or until a - * lb faults) - * - * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) - * may be more than a byte beyond the last address. - * Hence, the lb below may get an exception. - * - * Assumes src < THREAD_BUADDR($28) - */ - LOAD t0, TI_TASK($28) - li t2, SHIFT_START - LOAD t0, THREAD_BUADDR(t0) -1: -EXC( lbu t1, 0(src), .Ll_exc) - ADD src, src, 1 - sb t1, 0(dst) # can't fault -- we're copy_from_user - SLLV t1, t1, t2 - addu t2, SHIFT_INC - ADDC(sum, t1) - .set reorder /* DADDI_WAR */ - ADD dst, dst, 1 - bne src, t0, 1b - .set noreorder -.Ll_exc: - LOAD t0, TI_TASK($28) - nop - LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address - nop - SUB len, AT, t0 # len number of uncopied bytes - /* - * Here's where we rely on src and dst being incremented in tandem, - * See (3) above. - * dst += (fault addr - src) to put dst at first byte to clear - */ - ADD dst, t0 # compute start address in a1 - SUB dst, src - /* - * Clear len bytes starting at dst. Can't call __bzero because it - * might modify len. An inefficient loop for these rare times... - */ - .set reorder /* DADDI_WAR */ - SUB src, len, 1 - beqz len, .Ldone - .set noreorder -1: sb zero, 0(dst) - ADD dst, dst, 1 - .set push - .set noat -#ifndef CONFIG_CPU_DADDI_WORKAROUNDS - bnez src, 1b - SUB src, src, 1 -#else - li v1, 1 - bnez src, 1b - SUB src, src, v1 + .set noreorder +.L_exc: + jr ra + li v0, 0 + +FEXPORT(__csum_partial_copy_nocheck) +EXPORT_SYMBOL(__csum_partial_copy_nocheck) +#ifndef CONFIG_EVA +FEXPORT(__csum_partial_copy_to_user) +EXPORT_SYMBOL(__csum_partial_copy_to_user) +FEXPORT(__csum_partial_copy_from_user) +EXPORT_SYMBOL(__csum_partial_copy_from_user) #endif - li v1, -EFAULT - b .Ldone - sw v1, (errptr) +__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP -.Ls_exc: - li v0, -1 /* invalid checksum */ - li v1, -EFAULT - jr ra - sw v1, (errptr) - .set pop - END(__csum_partial_copy_user) +#ifdef CONFIG_EVA +LEAF(__csum_partial_copy_to_user) +__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP +END(__csum_partial_copy_to_user) + +LEAF(__csum_partial_copy_from_user) +__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP +END(__csum_partial_copy_from_user) +#endif diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c index 44713af15a62..ccdb1fc1e4bf 100644 --- a/arch/mips/lib/delay.c +++ b/arch/mips/lib/delay.c @@ -6,14 +6,24 @@ * Copyright (C) 1994 by Waldorf Electronics * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. - * Copyright (C) 2007 Maciej W. Rozycki + * Copyright (C) 2007, 2014 Maciej W. Rozycki */ -#include <linux/module.h> +#include <linux/delay.h> +#include <linux/export.h> #include <linux/param.h> #include <linux/smp.h> +#include <linux/stringify.h> +#include <asm/asm.h> #include <asm/compiler.h> -#include <asm/war.h> + +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS +#define GCC_DADDI_IMM_ASM() "I" +#else +#define GCC_DADDI_IMM_ASM() "r" +#endif + +#ifndef CONFIG_HAVE_PLAT_DELAY void __delay(unsigned long loops) { @@ -21,14 +31,10 @@ void __delay(unsigned long loops) " .set noreorder \n" " .align 3 \n" "1: bnez %0, 1b \n" -#if BITS_PER_LONG == 32 - " subu %0, 1 \n" -#else - " dsubu %0, 1 \n" -#endif + " " __stringify(LONG_SUBU) " %0, %1 \n" " .set reorder \n" : "=r" (loops) - : "0" (loops)); + : GCC_DADDI_IMM_ASM() (1), "0" (loops)); } EXPORT_SYMBOL(__delay); @@ -58,3 +64,5 @@ void __ndelay(unsigned long ns) __delay((ns * 0x00000005ull * HZ * lpj) >> 32); } EXPORT_SYMBOL(__ndelay); + +#endif diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c index 32b9f21bfd85..8ce97b8741fd 100644 --- a/arch/mips/lib/dump_tlb.c +++ b/arch/mips/lib/dump_tlb.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Dump R4x00 TLB for debugging purposes. * @@ -7,11 +8,41 @@ #include <linux/kernel.h> #include <linux/mm.h> +#include <asm/hazards.h> #include <asm/mipsregs.h> +#include <asm/mmu_context.h> #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/tlbdebug.h> +void dump_tlb_regs(void) +{ + const int field = 2 * sizeof(unsigned long); + + pr_info("Index : %0x\n", read_c0_index()); + pr_info("PageMask : %0x\n", read_c0_pagemask()); + if (cpu_has_guestid) + pr_info("GuestCtl1: %0x\n", read_c0_guestctl1()); + pr_info("EntryHi : %0*lx\n", field, read_c0_entryhi()); + pr_info("EntryLo0 : %0*lx\n", field, read_c0_entrylo0()); + pr_info("EntryLo1 : %0*lx\n", field, read_c0_entrylo1()); + pr_info("Wired : %0x\n", read_c0_wired()); + switch (current_cpu_type()) { + case CPU_R10000: + case CPU_R12000: + case CPU_R14000: + case CPU_R16000: + pr_info("FrameMask: %0x\n", read_c0_framemask()); + break; + } + if (cpu_has_small_pages || cpu_has_rixi || cpu_has_xpa) + pr_info("PageGrain: %0x\n", read_c0_pagegrain()); + if (cpu_has_htw) { + pr_info("PWField : %0*lx\n", field, read_c0_pwfield()); + pr_info("PWSize : %0*lx\n", field, read_c0_pwsize()); + pr_info("PWCtl : %0x\n", read_c0_pwctl()); + } +} + static inline const char *msk2str(unsigned int mask) { switch (mask) { @@ -28,83 +59,130 @@ static inline const char *msk2str(unsigned int mask) case PM_8M: return "8Mb"; case PM_32M: return "32Mb"; #endif -#ifndef CONFIG_CPU_VR41XX - case PM_1M: return "1Mb"; - case PM_4M: return "4Mb"; - case PM_16M: return "16Mb"; - case PM_64M: return "64Mb"; - case PM_256M: return "256Mb"; - case PM_1G: return "1Gb"; -#endif } return ""; } -#define BARRIER() \ - __asm__ __volatile__( \ - ".set\tnoreorder\n\t" \ - "nop;nop;nop;nop;nop;nop;nop\n\t" \ - ".set\treorder"); - static void dump_tlb(int first, int last) { - unsigned long s_entryhi, entryhi, asid; - unsigned long long entrylo0, entrylo1; - unsigned int s_index, s_pagemask, pagemask, c0, c1, i; + unsigned long s_entryhi, entryhi, asid, mmid; + unsigned long long entrylo0, entrylo1, pa; + unsigned int s_index, s_pagemask, s_guestctl1 = 0; + unsigned int pagemask, guestctl1 = 0, c0, c1, i; + unsigned long asidmask = cpu_asid_mask(¤t_cpu_data); + int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4); + unsigned long s_mmid; +#ifdef CONFIG_32BIT + bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA); + int pwidth = xpa ? 11 : 8; + int vwidth = 8; +#else + bool xpa = false; + int pwidth = 11; + int vwidth = 11; +#endif s_pagemask = read_c0_pagemask(); s_entryhi = read_c0_entryhi(); s_index = read_c0_index(); - asid = s_entryhi & 0xff; + + if (cpu_has_mmid) + asid = s_mmid = read_c0_memorymapid(); + else + asid = s_entryhi & asidmask; + + if (cpu_has_guestid) + s_guestctl1 = read_c0_guestctl1(); for (i = first; i <= last; i++) { write_c0_index(i); - BARRIER(); + mtc0_tlbr_hazard(); tlb_read(); - BARRIER(); + tlb_read_hazard(); pagemask = read_c0_pagemask(); entryhi = read_c0_entryhi(); entrylo0 = read_c0_entrylo0(); entrylo1 = read_c0_entrylo1(); - /* Unused entries have a virtual address of CKSEG0. */ - if ((entryhi & ~0x1ffffUL) != CKSEG0 - && (entryhi & 0xff) == asid) { -#ifdef CONFIG_32BIT - int width = 8; -#else - int width = 11; -#endif - /* - * Only print entries in use - */ - printk("Index: %2d pgmask=%s ", i, msk2str(pagemask)); - - c0 = (entrylo0 >> 3) & 7; - c1 = (entrylo1 >> 3) & 7; - - printk("va=%0*lx asid=%02lx\n", - width, (entryhi & ~0x1fffUL), - entryhi & 0xff); - printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", - width, - (entrylo0 << 6) & PAGE_MASK, c0, - (entrylo0 & 4) ? 1 : 0, - (entrylo0 & 2) ? 1 : 0, - (entrylo0 & 1) ? 1 : 0); - printk("[pa=%0*llx c=%d d=%d v=%d g=%d]\n", - width, - (entrylo1 << 6) & PAGE_MASK, c1, - (entrylo1 & 4) ? 1 : 0, - (entrylo1 & 2) ? 1 : 0, - (entrylo1 & 1) ? 1 : 0); - } + if (cpu_has_mmid) + mmid = read_c0_memorymapid(); + else + mmid = entryhi & asidmask; + + if (cpu_has_guestid) + guestctl1 = read_c0_guestctl1(); + + /* EHINV bit marks entire entry as invalid */ + if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV) + continue; + /* + * Prior to tlbinv, unused entries have a virtual address of + * CKSEG0. + */ + if ((entryhi & ~0x1ffffUL) == CKSEG0) + continue; + /* + * ASID takes effect in absence of G (global) bit. + * We check both G bits, even though architecturally they should + * match one another, because some revisions of the SB1 core may + * leave only a single G bit set after a machine check exception + * due to duplicate TLB entry. + */ + if (!((entrylo0 | entrylo1) & ENTRYLO_G) && (mmid != asid)) + continue; + + /* + * Only print entries in use + */ + printk("Index: %2d pgmask=%s ", i, msk2str(pagemask)); + + c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; + c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; + + pr_cont("va=%0*lx asid=%0*lx", + vwidth, (entryhi & ~0x1fffUL), + asidwidth, mmid); + if (cpu_has_guestid) + pr_cont(" gid=%02lx", + (guestctl1 & MIPS_GCTL1_RID) + >> MIPS_GCTL1_RID_SHIFT); + /* RI/XI are in awkward places, so mask them off separately */ + pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); + if (xpa) + pa |= (unsigned long long)readx_c0_entrylo0() << 30; + pa = (pa << 6) & PAGE_MASK; + pr_cont("\n\t["); + if (cpu_has_rixi) + pr_cont("ri=%d xi=%d ", + (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0, + (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0); + pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [", + pwidth, pa, c0, + (entrylo0 & ENTRYLO_D) ? 1 : 0, + (entrylo0 & ENTRYLO_V) ? 1 : 0, + (entrylo0 & ENTRYLO_G) ? 1 : 0); + /* RI/XI are in awkward places, so mask them off separately */ + pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); + if (xpa) + pa |= (unsigned long long)readx_c0_entrylo1() << 30; + pa = (pa << 6) & PAGE_MASK; + if (cpu_has_rixi) + pr_cont("ri=%d xi=%d ", + (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0, + (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0); + pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n", + pwidth, pa, c1, + (entrylo1 & ENTRYLO_D) ? 1 : 0, + (entrylo1 & ENTRYLO_V) ? 1 : 0, + (entrylo1 & ENTRYLO_G) ? 1 : 0); } printk("\n"); write_c0_entryhi(s_entryhi); write_c0_index(s_index); write_c0_pagemask(s_pagemask); + if (cpu_has_guestid) + write_c0_guestctl1(s_guestctl1); } void dump_tlb_all(void) diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c index fd35daa45314..1b7ce19fb3bb 100644 --- a/arch/mips/lib/iomap-pci.c +++ b/arch/mips/lib/iomap-pci.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Implement the default iomap interfaces * @@ -7,9 +8,11 @@ * written by Ralf Baechle <ralf@linux-mips.org> */ #include <linux/pci.h> -#include <linux/module.h> +#include <linux/export.h> #include <asm/io.h> +#ifdef CONFIG_PCI_DRIVERS_LEGACY + void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port, unsigned int nr) { @@ -29,7 +32,7 @@ void __iomem *__pci_ioport_map(struct pci_dev *dev, sprintf(name, "%04x:%02x", pci_domain_nr(bus), bus->number); printk(KERN_WARNING "io_map_base of root PCI bus %s unset. " "Trying to continue but you better\nfix this issue or " - "report it to linux-mips@linux-mips.org or your " + "report it to linux-mips@vger.kernel.org or your " "vendor.\n", name); #ifdef CONFIG_PCI_DOMAINS panic("To avoid data corruption io_map_base MUST be set with " @@ -40,9 +43,14 @@ void __iomem *__pci_ioport_map(struct pci_dev *dev, return (void __iomem *) (ctrl->io_map_base + port); } -void pci_iounmap(struct pci_dev *dev, void __iomem * addr) +void pci_iounmap(struct pci_dev *dev, void __iomem *addr) { - iounmap(addr); -} + struct pci_controller *ctrl = dev->bus->sysdata; + void __iomem *base = (void __iomem *)ctrl->io_map_base; + if (addr < base || addr > (base + resource_size(ctrl->io_resource))) + iounmap(addr); +} EXPORT_SYMBOL(pci_iounmap); + +#endif /* CONFIG_PCI_DRIVERS_LEGACY */ diff --git a/arch/mips/lib/iomap.c b/arch/mips/lib/iomap.c deleted file mode 100644 index e3acb2dad33a..000000000000 --- a/arch/mips/lib/iomap.c +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Implement the default iomap interfaces - * - * (C) Copyright 2004 Linus Torvalds - * (C) Copyright 2006 Ralf Baechle <ralf@linux-mips.org> - * (C) Copyright 2007 MIPS Technologies, Inc. - * written by Ralf Baechle <ralf@linux-mips.org> - */ -#include <linux/module.h> -#include <asm/io.h> - -/* - * Read/write from/to an (offsettable) iomem cookie. It might be a PIO - * access or a MMIO access, these functions don't care. The info is - * encoded in the hardware mapping set up by the mapping functions - * (or the cookie itself, depending on implementation and hw). - * - * The generic routines don't assume any hardware mappings, and just - * encode the PIO/MMIO as part of the cookie. They coldly assume that - * the MMIO IO mappings are not in the low address range. - * - * Architectures for which this is not true can't use this generic - * implementation and should do their own copy. - */ - -#define PIO_MASK 0x0ffffUL - -unsigned int ioread8(void __iomem *addr) -{ - return readb(addr); -} - -EXPORT_SYMBOL(ioread8); - -unsigned int ioread16(void __iomem *addr) -{ - return readw(addr); -} - -EXPORT_SYMBOL(ioread16); - -unsigned int ioread16be(void __iomem *addr) -{ - return be16_to_cpu(__raw_readw(addr)); -} - -EXPORT_SYMBOL(ioread16be); - -unsigned int ioread32(void __iomem *addr) -{ - return readl(addr); -} - -EXPORT_SYMBOL(ioread32); - -unsigned int ioread32be(void __iomem *addr) -{ - return be32_to_cpu(__raw_readl(addr)); -} - -EXPORT_SYMBOL(ioread32be); - -void iowrite8(u8 val, void __iomem *addr) -{ - writeb(val, addr); -} - -EXPORT_SYMBOL(iowrite8); - -void iowrite16(u16 val, void __iomem *addr) -{ - writew(val, addr); -} - -EXPORT_SYMBOL(iowrite16); - -void iowrite16be(u16 val, void __iomem *addr) -{ - __raw_writew(cpu_to_be16(val), addr); -} - -EXPORT_SYMBOL(iowrite16be); - -void iowrite32(u32 val, void __iomem *addr) -{ - writel(val, addr); -} - -EXPORT_SYMBOL(iowrite32); - -void iowrite32be(u32 val, void __iomem *addr) -{ - __raw_writel(cpu_to_be32(val), addr); -} - -EXPORT_SYMBOL(iowrite32be); - -/* - * These are the "repeat MMIO read/write" functions. - * Note the "__raw" accesses, since we don't want to - * convert to CPU byte order. We write in "IO byte - * order" (we also don't have IO barriers). - */ -static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) -{ - while (--count >= 0) { - u8 data = __raw_readb(addr); - *dst = data; - dst++; - } -} - -static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) -{ - while (--count >= 0) { - u16 data = __raw_readw(addr); - *dst = data; - dst++; - } -} - -static inline void mmio_insl(void __iomem *addr, u32 *dst, int count) -{ - while (--count >= 0) { - u32 data = __raw_readl(addr); - *dst = data; - dst++; - } -} - -static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count) -{ - while (--count >= 0) { - __raw_writeb(*src, addr); - src++; - } -} - -static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count) -{ - while (--count >= 0) { - __raw_writew(*src, addr); - src++; - } -} - -static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count) -{ - while (--count >= 0) { - __raw_writel(*src, addr); - src++; - } -} - -void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) -{ - mmio_insb(addr, dst, count); -} - -EXPORT_SYMBOL(ioread8_rep); - -void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) -{ - mmio_insw(addr, dst, count); -} - -EXPORT_SYMBOL(ioread16_rep); - -void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) -{ - mmio_insl(addr, dst, count); -} - -EXPORT_SYMBOL(ioread32_rep); - -void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count) -{ - mmio_outsb(addr, src, count); -} - -EXPORT_SYMBOL(iowrite8_rep); - -void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count) -{ - mmio_outsw(addr, src, count); -} - -EXPORT_SYMBOL(iowrite16_rep); - -void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count) -{ - mmio_outsl(addr, src, count); -} - -EXPORT_SYMBOL(iowrite32_rep); - -/* - * Create a virtual mapping cookie for an IO port range - * - * This uses the same mapping are as the in/out family which has to be setup - * by the platform initialization code. - * - * Just to make matters somewhat more interesting on MIPS systems with - * multiple host bridge each will have it's own ioport address space. - */ -static void __iomem *ioport_map_legacy(unsigned long port, unsigned int nr) -{ - return (void __iomem *) (mips_io_port_base + port); -} - -void __iomem *ioport_map(unsigned long port, unsigned int nr) -{ - if (port > PIO_MASK) - return NULL; - - return ioport_map_legacy(port, nr); -} - -EXPORT_SYMBOL(ioport_map); - -void ioport_unmap(void __iomem *addr) -{ - /* Nothing to do */ -} - -EXPORT_SYMBOL(ioport_unmap); diff --git a/arch/mips/lib/iomap_copy.c b/arch/mips/lib/iomap_copy.c new file mode 100644 index 000000000000..157500a09a48 --- /dev/null +++ b/arch/mips/lib/iomap_copy.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/export.h> +#include <linux/io.h> + +/** + * __ioread64_copy - copy data from MMIO space, in 64-bit units + * @to: destination (must be 64-bit aligned) + * @from: source, in MMIO space (must be 64-bit aligned) + * @count: number of 64-bit quantities to copy + * + * Copy data from MMIO space to kernel space, in units of 32 or 64 bits at a + * time. Order of access is not guaranteed, nor is a memory barrier + * performed afterwards. + */ +void __ioread64_copy(void *to, const void __iomem *from, size_t count) +{ +#ifdef CONFIG_64BIT + u64 *dst = to; + const u64 __iomem *src = from; + const u64 __iomem *end = src + count; + + while (src < end) + *dst++ = __raw_readq(src++); +#else + __ioread32_copy(to, from, count * 2); +#endif +} +EXPORT_SYMBOL_GPL(__ioread64_copy); diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h index 05909d58e2fe..199a7f96282f 100644 --- a/arch/mips/lib/libgcc.h +++ b/arch/mips/lib/libgcc.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_LIBGCC_H #define __ASM_LIBGCC_H @@ -9,10 +10,18 @@ typedef int word_type __attribute__ ((mode (__word__))); struct DWstruct { int high, low; }; + +struct TWstruct { + long long high, low; +}; #elif defined(__LITTLE_ENDIAN) struct DWstruct { int low, high; }; + +struct TWstruct { + long long low, high; +}; #else #error I feel sick. #endif @@ -22,4 +31,13 @@ typedef union { long long ll; } DWunion; +#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) +typedef int ti_type __attribute__((mode(TI))); + +typedef union { + struct TWstruct s; + ti_type ti; +} TWunion; +#endif + #endif /* __ASM_LIBGCC_H */ diff --git a/arch/mips/lib/lshrdi3.c b/arch/mips/lib/lshrdi3.c deleted file mode 100644 index dcf8d6810b7c..000000000000 --- a/arch/mips/lib/lshrdi3.c +++ /dev/null @@ -1,29 +0,0 @@ -#include <linux/module.h> - -#include "libgcc.h" - -long long __lshrdi3(long long u, word_type b) -{ - DWunion uu, w; - word_type bm; - - if (b == 0) - return u; - - uu.ll = u; - bm = 32 - b; - - if (bm <= 0) { - w.s.high = 0; - w.s.low = (unsigned int) uu.s.high >> -bm; - } else { - const unsigned int carries = (unsigned int) uu.s.high << bm; - - w.s.high = (unsigned int) uu.s.high >> b; - w.s.low = ((unsigned int) uu.s.low >> b) | carries; - } - - return w.ll; -} - -EXPORT_SYMBOL(__lshrdi3); diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index c5c40dad0bbf..a4b4e805ff13 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -10,6 +10,7 @@ * Copyright (C) 2002 Broadcom, Inc. * memcpy/copy_user author: Mark Vandevoorde * Copyright (C) 2007 Maciej W. Rozycki + * Copyright (C) 2014 Imagination Technologies Ltd. * * Mnemonic names for arguments to memcpy/__copy_user */ @@ -27,7 +28,11 @@ #ifdef CONFIG_MIPS_MALTA #undef CONFIG_CPU_HAS_PREFETCH #endif +#ifdef CONFIG_CPU_MIPSR6 +#undef CONFIG_CPU_HAS_PREFETCH +#endif +#include <linux/export.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/regdef.h> @@ -85,11 +90,51 @@ * they're not protected. */ -#define EXC(inst_reg,addr,handler) \ -9: inst_reg, addr; \ - .section __ex_table,"a"; \ - PTR 9b, handler; \ - .previous +/* Instruction type */ +#define LD_INSN 1 +#define ST_INSN 2 +/* Pretech type */ +#define SRC_PREFETCH 1 +#define DST_PREFETCH 2 +#define LEGACY_MODE 1 +#define EVA_MODE 2 +#define USEROP 1 +#define KERNELOP 2 + +/* + * Wrapper to add an entry in the exception table + * in case the insn causes a memory exception. + * Arguments: + * insn : Load/store instruction + * type : Instruction type + * reg : Register + * addr : Address + * handler : Exception handler + */ + +#define EXC(insn, type, reg, addr, handler) \ + .if \mode == LEGACY_MODE; \ +9: insn reg, addr; \ + .section __ex_table,"a"; \ + PTR_WD 9b, handler; \ + .previous; \ + /* This is assembled in EVA mode */ \ + .else; \ + /* If loading from user or storing to user */ \ + .if ((\from == USEROP) && (type == LD_INSN)) || \ + ((\to == USEROP) && (type == ST_INSN)); \ +9: __BUILD_EVA_INSN(insn##e, reg, addr); \ + .section __ex_table,"a"; \ + PTR_WD 9b, handler; \ + .previous; \ + .else; \ + /* \ + * Still in EVA, but no need for \ + * exception handler or EVA insn \ + */ \ + insn reg, addr; \ + .endif; \ + .endif /* * Only on the 64-bit kernel we can made use of 64-bit registers. @@ -100,12 +145,13 @@ #ifdef USE_DOUBLE -#define LOAD ld -#define LOADL ldl -#define LOADR ldr -#define STOREL sdl -#define STORER sdr -#define STORE sd +#define LOADK ld /* No exception */ +#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler) +#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler) +#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler) +#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler) +#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler) +#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler) #define ADD daddu #define SUB dsubu #define SRL dsrl @@ -136,12 +182,13 @@ #else -#define LOAD lw -#define LOADL lwl -#define LOADR lwr -#define STOREL swl -#define STORER swr -#define STORE sw +#define LOADK lw /* No exception */ +#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler) +#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler) +#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler) +#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler) +#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler) +#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler) #define ADD addu #define SUB subu #define SRL srl @@ -154,6 +201,37 @@ #endif /* USE_DOUBLE */ +#define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler) +#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) + +#ifdef CONFIG_CPU_HAS_PREFETCH +# define _PREF(hint, addr, type) \ + .if \mode == LEGACY_MODE; \ + kernel_pref(hint, addr); \ + .else; \ + .if ((\from == USEROP) && (type == SRC_PREFETCH)) || \ + ((\to == USEROP) && (type == DST_PREFETCH)); \ + /* \ + * PREFE has only 9 bits for the offset \ + * compared to PREF which has 16, so it may \ + * need to use the $at register but this \ + * register should remain intact because it's \ + * used later on. Therefore use $v1. \ + */ \ + .set at=v1; \ + user_pref(hint, addr); \ + .set noat; \ + .else; \ + kernel_pref(hint, addr); \ + .endif; \ + .endif +#else +# define _PREF(hint, addr, type) +#endif + +#define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH) +#define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH) + #ifdef CONFIG_CPU_LITTLE_ENDIAN #define LDFIRST LOADR #define LDREST LOADL @@ -182,27 +260,23 @@ .set at=v1 #endif -/* - * t6 is used as a flag to note inatomic mode. - */ -LEAF(__copy_user_inatomic) - b __copy_user_common - li t6, 1 - END(__copy_user_inatomic) - -/* - * A combined memcpy/__copy_user - * __copy_user sets len to 0 for success; else to an upper bound of - * the number of uncopied bytes. - * memcpy sets v0 to dst. - */ .align 5 -LEAF(memcpy) /* a0=dst a1=src a2=len */ - move v0, dst /* return value */ -.L__memcpy: -FEXPORT(__copy_user) - li t6, 0 /* not inatomic */ -__copy_user_common: + + /* + * Macro to build the __copy_user common code + * Arguments: + * mode : LEGACY_MODE or EVA_MODE + * from : Source operand. USEROP or KERNELOP + * to : Destination operand. USEROP or KERNELOP + */ + .macro __BUILD_COPY_USER mode, from, to + + /* initialize __memcpy if this the first time we execute this macro */ + .ifnotdef __memcpy + .set __memcpy, 1 + .hidden __memcpy /* make sure it does not leak */ + .endif + /* * Note: dst & src may be unaligned, len may be 0 * Temps @@ -217,99 +291,105 @@ __copy_user_common: * * If len < NBYTES use byte operations. */ - PREF( 0, 0(src) ) - PREF( 1, 0(dst) ) + PREFS( 0, 0(src) ) + PREFD( 1, 0(dst) ) sltu t2, len, NBYTES and t1, dst, ADDRMASK - PREF( 0, 1*32(src) ) - PREF( 1, 1*32(dst) ) - bnez t2, .Lcopy_bytes_checklen + PREFS( 0, 1*32(src) ) + PREFD( 1, 1*32(dst) ) + bnez t2, .Lcopy_bytes_checklen\@ and t0, src, ADDRMASK - PREF( 0, 2*32(src) ) - PREF( 1, 2*32(dst) ) - bnez t1, .Ldst_unaligned + PREFS( 0, 2*32(src) ) + PREFD( 1, 2*32(dst) ) +#ifndef CONFIG_CPU_NO_LOAD_STORE_LR + bnez t1, .Ldst_unaligned\@ nop - bnez t0, .Lsrc_unaligned_dst_aligned + bnez t0, .Lsrc_unaligned_dst_aligned\@ +#else /* CONFIG_CPU_NO_LOAD_STORE_LR */ + or t0, t0, t1 + bnez t0, .Lcopy_unaligned_bytes\@ +#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ /* * use delay slot for fall-through * src and dst are aligned; need to compute rem */ -.Lboth_aligned: +.Lboth_aligned\@: SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter - beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES + beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) - PREF( 0, 3*32(src) ) - PREF( 1, 3*32(dst) ) + PREFS( 0, 3*32(src) ) + PREFD( 1, 3*32(dst) ) .align 4 1: R10KCBARRIER(0(ra)) -EXC( LOAD t0, UNIT(0)(src), .Ll_exc) -EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) -EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) -EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) + LOAD(t0, UNIT(0)(src), .Ll_exc\@) + LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) + LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) + LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) SUB len, len, 8*NBYTES -EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy) -EXC( LOAD t7, UNIT(5)(src), .Ll_exc_copy) -EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p8u) -EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p7u) -EXC( LOAD t0, UNIT(6)(src), .Ll_exc_copy) -EXC( LOAD t1, UNIT(7)(src), .Ll_exc_copy) + LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@) + LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@) + STORE(t0, UNIT(0)(dst), .Ls_exc_p8u\@) + STORE(t1, UNIT(1)(dst), .Ls_exc_p7u\@) + LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@) + LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@) ADD src, src, 8*NBYTES ADD dst, dst, 8*NBYTES -EXC( STORE t2, UNIT(-6)(dst), .Ls_exc_p6u) -EXC( STORE t3, UNIT(-5)(dst), .Ls_exc_p5u) -EXC( STORE t4, UNIT(-4)(dst), .Ls_exc_p4u) -EXC( STORE t7, UNIT(-3)(dst), .Ls_exc_p3u) -EXC( STORE t0, UNIT(-2)(dst), .Ls_exc_p2u) -EXC( STORE t1, UNIT(-1)(dst), .Ls_exc_p1u) - PREF( 0, 8*32(src) ) - PREF( 1, 8*32(dst) ) + STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@) + STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@) + STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@) + STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@) + STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@) + STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@) + PREFS( 0, 8*32(src) ) + PREFD( 1, 8*32(dst) ) bne len, rem, 1b nop /* * len == rem == the number of bytes left to copy < 8*NBYTES */ -.Lcleanup_both_aligned: - beqz len, .Ldone +.Lcleanup_both_aligned\@: + beqz len, .Ldone\@ sltu t0, len, 4*NBYTES - bnez t0, .Lless_than_4units + bnez t0, .Lless_than_4units\@ and rem, len, (NBYTES-1) # rem = len % NBYTES /* * len >= 4*NBYTES */ -EXC( LOAD t0, UNIT(0)(src), .Ll_exc) -EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) -EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) -EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) + LOAD( t0, UNIT(0)(src), .Ll_exc\@) + LOAD( t1, UNIT(1)(src), .Ll_exc_copy\@) + LOAD( t2, UNIT(2)(src), .Ll_exc_copy\@) + LOAD( t3, UNIT(3)(src), .Ll_exc_copy\@) SUB len, len, 4*NBYTES ADD src, src, 4*NBYTES R10KCBARRIER(0(ra)) -EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u) -EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u) -EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u) -EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u) + STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@) + STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@) + STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@) + STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES - beqz len, .Ldone + beqz len, .Ldone\@ .set noreorder -.Lless_than_4units: +.Lless_than_4units\@: /* * rem = len % NBYTES */ - beq rem, len, .Lcopy_bytes + beq rem, len, .Lcopy_bytes\@ nop 1: R10KCBARRIER(0(ra)) -EXC( LOAD t0, 0(src), .Ll_exc) + LOAD(t0, 0(src), .Ll_exc\@) ADD src, src, NBYTES SUB len, len, NBYTES -EXC( STORE t0, 0(dst), .Ls_exc_p1u) + STORE(t0, 0(dst), .Ls_exc_p1u\@) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES bne rem, len, 1b .set noreorder +#ifndef CONFIG_CPU_NO_LOAD_STORE_LR /* * src and dst are aligned, need to copy rem bytes (rem < NBYTES) * A loop would do only a byte at a time with possible branch @@ -322,17 +402,17 @@ EXC( STORE t0, 0(dst), .Ls_exc_p1u) * more instruction-level parallelism. */ #define bits t2 - beqz len, .Ldone + beqz len, .Ldone\@ ADD t1, dst, len # t1 is just past last byte of dst li bits, 8*NBYTES SLL rem, len, 3 # rem = number of bits to keep -EXC( LOAD t0, 0(src), .Ll_exc) + LOAD(t0, 0(src), .Ll_exc\@) SUB bits, bits, rem # bits = number of bits to discard SHIFT_DISCARD t0, t0, bits -EXC( STREST t0, -1(t1), .Ls_exc) + STREST(t0, -1(t1), .Ls_exc\@) jr ra move len, zero -.Ldst_unaligned: +.Ldst_unaligned\@: /* * dst is unaligned * t0 = src & ADDRMASK @@ -343,25 +423,25 @@ EXC( STREST t0, -1(t1), .Ls_exc) * Set match = (src and dst have same alignment) */ #define match rem -EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc) + LDFIRST(t3, FIRST(0)(src), .Ll_exc\@) ADD t2, zero, NBYTES -EXC( LDREST t3, REST(0)(src), .Ll_exc_copy) + LDREST(t3, REST(0)(src), .Ll_exc_copy\@) SUB t2, t2, t1 # t2 = number of bytes copied xor match, t0, t1 R10KCBARRIER(0(ra)) -EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc) - beq len, t2, .Ldone + STFIRST(t3, FIRST(0)(dst), .Ls_exc\@) + beq len, t2, .Ldone\@ SUB len, len, t2 ADD dst, dst, t2 - beqz match, .Lboth_aligned + beqz match, .Lboth_aligned\@ ADD src, src, t2 -.Lsrc_unaligned_dst_aligned: +.Lsrc_unaligned_dst_aligned\@: SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter - PREF( 0, 3*32(src) ) - beqz t0, .Lcleanup_src_unaligned + PREFS( 0, 3*32(src) ) + beqz t0, .Lcleanup_src_unaligned\@ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES - PREF( 1, 3*32(dst) ) + PREFD( 1, 3*32(dst) ) 1: /* * Avoid consecutive LD*'s to the same register since some mips @@ -370,58 +450,59 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc) * are to the same unit (unless src is aligned, but it's not). */ R10KCBARRIER(0(ra)) -EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) -EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy) + LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) + LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@) SUB len, len, 4*NBYTES -EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) -EXC( LDREST t1, REST(1)(src), .Ll_exc_copy) -EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy) -EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy) -EXC( LDREST t2, REST(2)(src), .Ll_exc_copy) -EXC( LDREST t3, REST(3)(src), .Ll_exc_copy) - PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) + LDREST(t0, REST(0)(src), .Ll_exc_copy\@) + LDREST(t1, REST(1)(src), .Ll_exc_copy\@) + LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@) + LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@) + LDREST(t2, REST(2)(src), .Ll_exc_copy\@) + LDREST(t3, REST(3)(src), .Ll_exc_copy\@) + PREFS( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) ADD src, src, 4*NBYTES #ifdef CONFIG_CPU_SB1 nop # improves slotting #endif -EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u) -EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u) -EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u) -EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u) - PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) + STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@) + STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@) + STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@) + STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@) + PREFD( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES bne len, rem, 1b .set noreorder -.Lcleanup_src_unaligned: - beqz len, .Ldone +.Lcleanup_src_unaligned\@: + beqz len, .Ldone\@ and rem, len, NBYTES-1 # rem = len % NBYTES - beq rem, len, .Lcopy_bytes + beq rem, len, .Lcopy_bytes\@ nop 1: R10KCBARRIER(0(ra)) -EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) -EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) + LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) + LDREST(t0, REST(0)(src), .Ll_exc_copy\@) ADD src, src, NBYTES SUB len, len, NBYTES -EXC( STORE t0, 0(dst), .Ls_exc_p1u) + STORE(t0, 0(dst), .Ls_exc_p1u\@) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES bne len, rem, 1b .set noreorder -.Lcopy_bytes_checklen: - beqz len, .Ldone +#endif /* !CONFIG_CPU_NO_LOAD_STORE_LR */ +.Lcopy_bytes_checklen\@: + beqz len, .Ldone\@ nop -.Lcopy_bytes: +.Lcopy_bytes\@: /* 0 < len < NBYTES */ R10KCBARRIER(0(ra)) #define COPY_BYTE(N) \ -EXC( lb t0, N(src), .Ll_exc); \ + LOADB(t0, N(src), .Ll_exc\@); \ SUB len, len, 1; \ - beqz len, .Ldone; \ -EXC( sb t0, N(dst), .Ls_exc_p1) + beqz len, .Ldone\@; \ + STOREB(t0, N(dst), .Ls_exc_p1\@) COPY_BYTE(0) COPY_BYTE(1) @@ -431,16 +512,36 @@ EXC( sb t0, N(dst), .Ls_exc_p1) COPY_BYTE(4) COPY_BYTE(5) #endif -EXC( lb t0, NBYTES-2(src), .Ll_exc) + LOADB(t0, NBYTES-2(src), .Ll_exc\@) SUB len, len, 1 jr ra -EXC( sb t0, NBYTES-2(dst), .Ls_exc_p1) -.Ldone: + STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) +.Ldone\@: jr ra nop + +#ifdef CONFIG_CPU_NO_LOAD_STORE_LR +.Lcopy_unaligned_bytes\@: +1: + COPY_BYTE(0) + COPY_BYTE(1) + COPY_BYTE(2) + COPY_BYTE(3) + COPY_BYTE(4) + COPY_BYTE(5) + COPY_BYTE(6) + COPY_BYTE(7) + ADD src, src, 8 + b 1b + ADD dst, dst, 8 +#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ + .if __memcpy == 1 END(memcpy) + .set __memcpy, 0 + .hidden __memcpy + .endif -.Ll_exc_copy: +.Ll_exc_copy\@: /* * Copy bytes from src until faulting load address (or until a * lb faults) @@ -451,59 +552,29 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc_p1) * * Assumes src < THREAD_BUADDR($28) */ - LOAD t0, TI_TASK($28) + LOADK t0, TI_TASK($28) nop - LOAD t0, THREAD_BUADDR(t0) + LOADK t0, THREAD_BUADDR(t0) 1: -EXC( lb t1, 0(src), .Ll_exc) + LOADB(t1, 0(src), .Ll_exc\@) ADD src, src, 1 sb t1, 0(dst) # can't fault -- we're copy_from_user .set reorder /* DADDI_WAR */ ADD dst, dst, 1 bne src, t0, 1b .set noreorder -.Ll_exc: - LOAD t0, TI_TASK($28) +.Ll_exc\@: + LOADK t0, TI_TASK($28) nop - LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address + LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address nop SUB len, AT, t0 # len number of uncopied bytes - bnez t6, .Ldone /* Skip the zeroing part if inatomic */ - /* - * Here's where we rely on src and dst being incremented in tandem, - * See (3) above. - * dst += (fault addr - src) to put dst at first byte to clear - */ - ADD dst, t0 # compute start address in a1 - SUB dst, src - /* - * Clear len bytes starting at dst. Can't call __bzero because it - * might modify len. An inefficient loop for these rare times... - */ - .set reorder /* DADDI_WAR */ - SUB src, len, 1 - beqz len, .Ldone - .set noreorder -1: sb zero, 0(dst) - ADD dst, dst, 1 -#ifndef CONFIG_CPU_DADDI_WORKAROUNDS - bnez src, 1b - SUB src, src, 1 -#else - .set push - .set noat - li v1, 1 - bnez src, 1b - SUB src, src, v1 - .set pop -#endif jr ra nop - #define SEXC(n) \ .set reorder; /* DADDI_WAR */ \ -.Ls_exc_p ## n ## u: \ +.Ls_exc_p ## n ## u\@: \ ADD len, len, n*NBYTES; \ jr ra; \ .set noreorder @@ -517,17 +588,20 @@ SEXC(3) SEXC(2) SEXC(1) -.Ls_exc_p1: +.Ls_exc_p1\@: .set reorder /* DADDI_WAR */ ADD len, len, 1 jr ra .set noreorder -.Ls_exc: +.Ls_exc\@: jr ra nop + .endm +#ifndef CONFIG_HAVE_PLAT_MEMCPY .align 5 LEAF(memmove) +EXPORT_SYMBOL(memmove) ADD t0, a0, a2 ADD t1, a1, a2 sltu t0, a1, t0 # dst + len <= src -> memcpy @@ -575,3 +649,56 @@ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ jr ra move a2, zero END(__rmemcpy) + +/* + * A combined memcpy/__copy_user + * __copy_user sets len to 0 for success; else to an upper bound of + * the number of uncopied bytes. + * memcpy sets v0 to dst. + */ + .align 5 +LEAF(memcpy) /* a0=dst a1=src a2=len */ +EXPORT_SYMBOL(memcpy) + move v0, dst /* return value */ +.L__memcpy: +#ifndef CONFIG_EVA +FEXPORT(__raw_copy_from_user) +EXPORT_SYMBOL(__raw_copy_from_user) +FEXPORT(__raw_copy_to_user) +EXPORT_SYMBOL(__raw_copy_to_user) +#endif + /* Legacy Mode, user <-> user */ + __BUILD_COPY_USER LEGACY_MODE USEROP USEROP + +#endif + +#ifdef CONFIG_EVA + +/* + * For EVA we need distinct symbols for reading and writing to user space. + * This is because we need to use specific EVA instructions to perform the + * virtual <-> physical translation when a virtual address is actually in user + * space + */ + +/* + * __copy_from_user (EVA) + */ + +LEAF(__raw_copy_from_user) +EXPORT_SYMBOL(__raw_copy_from_user) + __BUILD_COPY_USER EVA_MODE USEROP KERNELOP +END(__raw_copy_from_user) + + + +/* + * __copy_to_user (EVA) + */ + +LEAF(__raw_copy_to_user) +EXPORT_SYMBOL(__raw_copy_to_user) +__BUILD_COPY_USER EVA_MODE KERNELOP USEROP +END(__raw_copy_to_user) + +#endif diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index 0580194e7402..79405c32cc85 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -8,6 +8,7 @@ * Copyright (C) 2007 by Maciej W. Rozycki * Copyright (C) 2011, 2012 MIPS Technologies, Inc. */ +#include <linux/export.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/regdef.h> @@ -34,13 +35,27 @@ #define FILLPTRG t0 #endif +#define LEGACY_MODE 1 +#define EVA_MODE 2 + +/* + * No need to protect it with EVA #ifdefery. The generated block of code + * will never be assembled if EVA is not enabled. + */ +#define __EVAFY(insn, reg, addr) __BUILD_EVA_INSN(insn##e, reg, addr) +#define ___BUILD_EVA_INSN(insn, reg, addr) __EVAFY(insn, reg, addr) + #define EX(insn,reg,addr,handler) \ -9: insn reg, addr; \ + .if \mode == LEGACY_MODE; \ +9: insn reg, addr; \ + .else; \ +9: ___BUILD_EVA_INSN(insn, reg, addr); \ + .endif; \ .section __ex_table,"a"; \ - PTR 9b, handler; \ + PTR_WD 9b, handler; \ .previous - .macro f_fill64 dst, offset, val, fixup + .macro f_fill64 dst, offset, val, fixup, mode EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup) @@ -63,39 +78,31 @@ #endif .endm -/* - * memset(void *s, int c, size_t n) - * - * a0: start of area to clear - * a1: char to fill with - * a2: size of area to clear - */ - .set noreorder .align 5 -LEAF(memset) - beqz a1, 1f - move v0, a0 /* result */ - andi a1, 0xff /* spread fillword */ - LONG_SLL t1, a1, 8 - or a1, t1 - LONG_SLL t1, a1, 16 -#if LONGSIZE == 8 - or a1, t1 - LONG_SLL t1, a1, 32 -#endif - or a1, t1 -1: + /* + * Macro to generate the __bzero{,_user} symbol + * Arguments: + * mode: LEGACY_MODE or EVA_MODE + */ + .macro __BUILD_BZERO mode + /* Initialize __memset if this is the first time we call this macro */ + .ifnotdef __memset + .set __memset, 1 + .hidden __memset /* Make sure it does not leak */ + .endif -FEXPORT(__bzero) sltiu t0, a2, STORSIZE /* very small region? */ - bnez t0, .Lsmall_memset + .set noreorder + bnez t0, .Lsmall_memset\@ andi t0, a0, STORMASK /* aligned? */ + .set reorder #ifdef CONFIG_CPU_MICROMIPS move t8, a1 /* used by 'swp' instruction */ move t9, a1 #endif + .set noreorder #ifndef CONFIG_CPU_DADDI_WORKAROUNDS beqz t0, 1f PTR_SUBU t0, STORSIZE /* alignment in bytes */ @@ -106,31 +113,56 @@ FEXPORT(__bzero) PTR_SUBU t0, AT /* alignment in bytes */ .set at #endif + .set reorder +#ifndef CONFIG_CPU_NO_LOAD_STORE_LR R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ - EX(LONG_S_L, a1, (a0), .Lfirst_fixup) /* make word/dword aligned */ -#endif -#ifdef __MIPSEL__ - EX(LONG_S_R, a1, (a0), .Lfirst_fixup) /* make word/dword aligned */ + EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ +#else + EX(LONG_S_R, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ #endif PTR_SUBU a0, t0 /* long align ptr */ PTR_ADDU a2, t0 /* correct size */ +#else /* CONFIG_CPU_NO_LOAD_STORE_LR */ +#define STORE_BYTE(N) \ + EX(sb, a1, N(a0), .Lbyte_fixup\@); \ + .set noreorder; \ + beqz t0, 0f; \ + PTR_ADDU t0, 1; \ + .set reorder; + + PTR_ADDU a2, t0 /* correct size */ + PTR_ADDU t0, 1 + STORE_BYTE(0) + STORE_BYTE(1) +#if LONGSIZE == 4 + EX(sb, a1, 2(a0), .Lbyte_fixup\@) +#else + STORE_BYTE(2) + STORE_BYTE(3) + STORE_BYTE(4) + STORE_BYTE(5) + EX(sb, a1, 6(a0), .Lbyte_fixup\@) +#endif +0: + ori a0, STORMASK + xori a0, STORMASK + PTR_ADDIU a0, STORSIZE +#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ 1: ori t1, a2, 0x3f /* # of full blocks */ xori t1, 0x3f - beqz t1, .Lmemset_partial /* no block to fill */ - andi t0, a2, 0x40-STORSIZE + andi t0, a2, 0x40-STORSIZE + beqz t1, .Lmemset_partial\@ /* no block to fill */ PTR_ADDU t1, a0 /* end address */ - .set reorder 1: PTR_ADDIU a0, 64 R10KCBARRIER(0(ra)) - f_fill64 a0, -64, FILL64RG, .Lfwd_fixup + f_fill64 a0, -64, FILL64RG, .Lfwd_fixup\@, \mode bne t1, a0, 1b - .set noreorder -.Lmemset_partial: +.Lmemset_partial\@: R10KCBARRIER(0(ra)) PTR_LA t1, 2f /* where to start */ #ifdef CONFIG_CPU_MICROMIPS @@ -144,61 +176,150 @@ FEXPORT(__bzero) PTR_SUBU t1, AT .set at #endif + PTR_ADDU a0, t0 /* dest ptr */ jr t1 - PTR_ADDU a0, t0 /* dest ptr */ - .set push - .set noreorder - .set nomacro - f_fill64 a0, -64, FILL64RG, .Lpartial_fixup /* ... but first do longs ... */ -2: .set pop - andi a2, STORMASK /* At most one long to go */ + /* ... but first do longs ... */ + f_fill64 a0, -64, FILL64RG, .Lpartial_fixup\@, \mode +2: andi a2, STORMASK /* At most one long to go */ + .set noreorder beqz a2, 1f +#ifndef CONFIG_CPU_NO_LOAD_STORE_LR PTR_ADDU a0, a2 /* What's left */ + .set reorder R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ - EX(LONG_S_R, a1, -1(a0), .Llast_fixup) + EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@) +#else + EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) #endif -#ifdef __MIPSEL__ - EX(LONG_S_L, a1, -1(a0), .Llast_fixup) +#else /* CONFIG_CPU_NO_LOAD_STORE_LR */ + PTR_SUBU t0, $0, a2 + .set reorder + move a2, zero /* No remaining longs */ + PTR_ADDIU t0, 1 + STORE_BYTE(0) + STORE_BYTE(1) +#if LONGSIZE == 4 + EX(sb, a1, 2(a0), .Lbyte_fixup\@) +#else + STORE_BYTE(2) + STORE_BYTE(3) + STORE_BYTE(4) + STORE_BYTE(5) + EX(sb, a1, 6(a0), .Lbyte_fixup\@) #endif -1: jr ra - move a2, zero +0: +#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ +1: move a2, zero + jr ra -.Lsmall_memset: +.Lsmall_memset\@: + PTR_ADDU t1, a0, a2 beqz a2, 2f - PTR_ADDU t1, a0, a2 1: PTR_ADDIU a0, 1 /* fill bytewise */ R10KCBARRIER(0(ra)) + .set noreorder bne t1, a0, 1b - sb a1, -1(a0) + EX(sb, a1, -1(a0), .Lsmall_fixup\@) + .set reorder -2: jr ra /* done */ - move a2, zero +2: move a2, zero + jr ra /* done */ + .if __memset == 1 END(memset) + .set __memset, 0 + .hidden __memset + .endif -.Lfirst_fixup: +#ifdef CONFIG_CPU_NO_LOAD_STORE_LR +.Lbyte_fixup\@: + /* + * unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1 + * a2 = a2 - t0 + 1 + */ + PTR_SUBU a2, t0 + PTR_ADDIU a2, 1 + jr ra +#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ + +.Lfirst_fixup\@: + /* unset_bytes already in a2 */ jr ra - nop -.Lfwd_fixup: +.Lfwd_fixup\@: + /* + * unset_bytes = partial_start_addr + #bytes - fault_addr + * a2 = t1 + (a2 & 3f) - $28->task->BUADDR + */ PTR_L t0, TI_TASK($28) andi a2, 0x3f LONG_L t0, THREAD_BUADDR(t0) LONG_ADDU a2, t1 + LONG_SUBU a2, t0 jr ra - LONG_SUBU a2, t0 -.Lpartial_fixup: +.Lpartial_fixup\@: + /* + * unset_bytes = partial_end_addr + #bytes - fault_addr + * a2 = a0 + (a2 & STORMASK) - $28->task->BUADDR + */ PTR_L t0, TI_TASK($28) andi a2, STORMASK LONG_L t0, THREAD_BUADDR(t0) - LONG_ADDU a2, t1 + LONG_ADDU a2, a0 + LONG_SUBU a2, t0 jr ra - LONG_SUBU a2, t0 -.Llast_fixup: +.Llast_fixup\@: + /* unset_bytes already in a2 */ jr ra - andi v1, a2, STORMASK + +.Lsmall_fixup\@: + /* + * unset_bytes = end_addr - current_addr + 1 + * a2 = t1 - a0 + 1 + */ + PTR_SUBU a2, t1, a0 + PTR_ADDIU a2, 1 + jr ra + + .endm + +/* + * memset(void *s, int c, size_t n) + * + * a0: start of area to clear + * a1: char to fill with + * a2: size of area to clear + */ + +LEAF(memset) +EXPORT_SYMBOL(memset) + move v0, a0 /* result */ + beqz a1, 1f + + andi a1, 0xff /* spread fillword */ + LONG_SLL t1, a1, 8 + or a1, t1 + LONG_SLL t1, a1, 16 +#if LONGSIZE == 8 + or a1, t1 + LONG_SLL t1, a1, 32 +#endif + or a1, t1 +1: +#ifndef CONFIG_EVA +FEXPORT(__bzero) +EXPORT_SYMBOL(__bzero) +#endif + __BUILD_BZERO LEGACY_MODE + +#ifdef CONFIG_EVA +LEAF(__bzero) +EXPORT_SYMBOL(__bzero) + __BUILD_BZERO EVA_MODE +END(__bzero) +#endif diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c index 6807f7172eaf..a9b72eacfc0b 100644 --- a/arch/mips/lib/mips-atomic.c +++ b/arch/mips/lib/mips-atomic.c @@ -15,7 +15,7 @@ #include <linux/export.h> #include <linux/stringify.h> -#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) +#if !defined(CONFIG_CPU_HAS_DIEI) /* * For cli() we have to insert nops to make sure that the new value @@ -37,68 +37,48 @@ */ notrace void arch_local_irq_disable(void) { - preempt_disable(); + preempt_disable_notrace(); __asm__ __volatile__( " .set push \n" " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 $1, $2, 1 \n" - " ori $1, 0x400 \n" - " .set noreorder \n" - " mtc0 $1, $2, 1 \n" -#elif defined(CONFIG_CPU_MIPSR2) - /* see irqflags.h for inline function */ -#else " mfc0 $1,$12 \n" " ori $1,0x1f \n" " xori $1,0x1f \n" " .set noreorder \n" " mtc0 $1,$12 \n" -#endif " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" : /* no outputs */ : /* no inputs */ : "memory"); - preempt_enable(); + preempt_enable_notrace(); } EXPORT_SYMBOL(arch_local_irq_disable); - notrace unsigned long arch_local_irq_save(void) { unsigned long flags; - preempt_disable(); + preempt_disable_notrace(); __asm__ __volatile__( " .set push \n" " .set reorder \n" " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 %[flags], $2, 1 \n" - " ori $1, %[flags], 0x400 \n" - " .set noreorder \n" - " mtc0 $1, $2, 1 \n" - " andi %[flags], %[flags], 0x400 \n" -#elif defined(CONFIG_CPU_MIPSR2) - /* see irqflags.h for inline function */ -#else " mfc0 %[flags], $12 \n" " ori $1, %[flags], 0x1f \n" " xori $1, 0x1f \n" " .set noreorder \n" " mtc0 $1, $12 \n" -#endif " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" : [flags] "=r" (flags) : /* no inputs */ : "memory"); - preempt_enable(); + preempt_enable_notrace(); return flags; } @@ -108,88 +88,26 @@ notrace void arch_local_irq_restore(unsigned long flags) { unsigned long __tmp1; -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC kernel needs to do a software replay of queued - * IPIs, at the cost of branch and call overhead on each - * local_irq_restore() - */ - if (unlikely(!(flags & 0x0400))) - smtc_ipi_replay(); -#endif - preempt_disable(); + preempt_disable_notrace(); __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 $1, $2, 1 \n" - " andi %[flags], 0x400 \n" - " ori $1, 0x400 \n" - " xori $1, 0x400 \n" - " or %[flags], $1 \n" - " mtc0 %[flags], $2, 1 \n" -#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) - /* see irqflags.h for inline function */ -#elif defined(CONFIG_CPU_MIPSR2) - /* see irqflags.h for inline function */ -#else " mfc0 $1, $12 \n" " andi %[flags], 1 \n" " ori $1, 0x1f \n" " xori $1, 0x1f \n" " or %[flags], $1 \n" " mtc0 %[flags], $12 \n" -#endif " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" : [flags] "=r" (__tmp1) : "0" (flags) : "memory"); - preempt_enable(); + preempt_enable_notrace(); } EXPORT_SYMBOL(arch_local_irq_restore); - -notrace void __arch_local_irq_restore(unsigned long flags) -{ - unsigned long __tmp1; - - preempt_disable(); - - __asm__ __volatile__( - " .set push \n" - " .set noreorder \n" - " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 $1, $2, 1 \n" - " andi %[flags], 0x400 \n" - " ori $1, 0x400 \n" - " xori $1, 0x400 \n" - " or %[flags], $1 \n" - " mtc0 %[flags], $2, 1 \n" -#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) - /* see irqflags.h for inline function */ -#elif defined(CONFIG_CPU_MIPSR2) - /* see irqflags.h for inline function */ -#else - " mfc0 $1, $12 \n" - " andi %[flags], 1 \n" - " ori $1, 0x1f \n" - " xori $1, 0x1f \n" - " or %[flags], $1 \n" - " mtc0 %[flags], $12 \n" -#endif - " " __stringify(__irq_disable_hazard) " \n" - " .set pop \n" - : [flags] "=r" (__tmp1) - : "0" (flags) - : "memory"); - - preempt_enable(); -} -EXPORT_SYMBOL(__arch_local_irq_restore); - -#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */ +#endif /* !CONFIG_CPU_HAS_DIEI */ diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c new file mode 100644 index 000000000000..4c2483f410c2 --- /dev/null +++ b/arch/mips/lib/multi3.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/export.h> + +#include "libgcc.h" + +/* + * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for + * that specific case only we implement that intrinsic here. + * + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981 + */ +#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8) + +/* multiply 64-bit values, low 64-bits returned */ +static inline long long notrace dmulu(long long a, long long b) +{ + long long res; + + asm ("dmulu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b)); + return res; +} + +/* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */ +static inline long long notrace dmuhu(long long a, long long b) +{ + long long res; + + asm ("dmuhu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b)); + return res; +} + +/* multiply 128-bit values, low 128-bits returned */ +ti_type notrace __multi3(ti_type a, ti_type b) +{ + TWunion res, aa, bb; + + aa.ti = a; + bb.ti = b; + + /* + * a * b = (a.lo * b.lo) + * + 2^64 * (a.hi * b.lo + a.lo * b.hi) + * [+ 2^128 * (a.hi * b.hi)] + */ + res.s.low = dmulu(aa.s.low, bb.s.low); + res.s.high = dmuhu(aa.s.low, bb.s.low); + res.s.high += dmulu(aa.s.high, bb.s.low); + res.s.high += dmulu(aa.s.low, bb.s.high); + + return res.ti; +} +EXPORT_SYMBOL(__multi3); + +#endif /* 64BIT && CPU_MIPSR6 && GCC7 */ diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c index 91615c2ef0cf..fcf594af0002 100644 --- a/arch/mips/lib/r3k_dump_tlb.c +++ b/arch/mips/lib/r3k_dump_tlb.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Dump R3000 TLB for debugging purposes. * @@ -9,19 +10,25 @@ #include <linux/mm.h> #include <asm/mipsregs.h> +#include <asm/mmu_context.h> #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/tlbdebug.h> -extern int r3k_have_wired_reg; /* defined in tlb-r3k.c */ +void dump_tlb_regs(void) +{ + pr_info("Index : %0x\n", read_c0_index()); + pr_info("EntryHi : %0lx\n", read_c0_entryhi()); + pr_info("EntryLo : %0lx\n", read_c0_entrylo0()); +} static void dump_tlb(int first, int last) { int i; unsigned int asid; - unsigned long entryhi, entrylo0; + unsigned long entryhi, entrylo0, asid_mask; - asid = read_c0_entryhi() & 0xfc0; + asid_mask = cpu_asid_mask(¤t_cpu_data); + asid = read_c0_entryhi() & asid_mask; for (i = first; i <= last; i++) { write_c0_index(i<<8); @@ -34,22 +41,23 @@ static void dump_tlb(int first, int last) entrylo0 = read_c0_entrylo0(); /* Unused entries have a virtual address of KSEG0. */ - if ((entryhi & 0xffffe000) != 0x80000000 - && (entryhi & 0xfc0) == asid) { + if ((entryhi & PAGE_MASK) != KSEG0 && + (entrylo0 & R3K_ENTRYLO_G || + (entryhi & asid_mask) == asid)) { /* * Only print entries in use */ printk("Index: %2d ", i); - printk("va=%08lx asid=%08lx" - " [pa=%06lx n=%d d=%d v=%d g=%d]", - (entryhi & 0xffffe000), - entryhi & 0xfc0, - entrylo0 & PAGE_MASK, - (entrylo0 & (1 << 11)) ? 1 : 0, - (entrylo0 & (1 << 10)) ? 1 : 0, - (entrylo0 & (1 << 9)) ? 1 : 0, - (entrylo0 & (1 << 8)) ? 1 : 0); + pr_cont("va=%08lx asid=%08lx" + " [pa=%06lx n=%d d=%d v=%d g=%d]", + entryhi & PAGE_MASK, + entryhi & asid_mask, + entrylo0 & PAGE_MASK, + (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0, + (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0, + (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0, + (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0); } } printk("\n"); diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S deleted file mode 100644 index e362dcdc69d1..000000000000 --- a/arch/mips/lib/strlen_user.S +++ /dev/null @@ -1,40 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996, 1998, 1999, 2004 by Ralf Baechle - * Copyright (C) 1999 Silicon Graphics, Inc. - * Copyright (C) 2011 MIPS Technologies, Inc. - */ -#include <asm/asm.h> -#include <asm/asm-offsets.h> -#include <asm/regdef.h> - -#define EX(insn,reg,addr,handler) \ -9: insn reg, addr; \ - .section __ex_table,"a"; \ - PTR 9b, handler; \ - .previous - -/* - * Return the size of a string (including the ending 0) - * - * Return 0 for error - */ -LEAF(__strlen_user_asm) - LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? - and v0, a0 - bnez v0, .Lfault - -FEXPORT(__strlen_user_nocheck_asm) - move v0, a0 -1: EX(lbu, v1, (v0), .Lfault) - PTR_ADDIU v0, 1 - bnez v1, 1b - PTR_SUBU v0, a0 - jr ra - END(__strlen_user_asm) - -.Lfault: move v0, zero - jr ra diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S index 92870b6b53ea..94f4203563c1 100644 --- a/arch/mips/lib/strncpy_user.S +++ b/arch/mips/lib/strncpy_user.S @@ -7,6 +7,7 @@ * Copyright (C) 2011 MIPS Technologies, Inc. */ #include <linux/errno.h> +#include <linux/export.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/regdef.h> @@ -14,7 +15,7 @@ #define EX(insn,reg,addr,handler) \ 9: insn reg, addr; \ .section __ex_table,"a"; \ - PTR 9b, handler; \ + PTR_WD 9b, handler; \ .previous /* @@ -29,33 +30,36 @@ */ LEAF(__strncpy_from_user_asm) - LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? - and v0, a1 - bnez v0, .Lfault - -FEXPORT(__strncpy_from_user_nocheck_asm) - .set noreorder move t0, zero move v1, a1 +#ifdef CONFIG_EVA + .set push + .set eva +1: EX(lbue, v0, (v1), .Lfault) + .set pop +#else 1: EX(lbu, v0, (v1), .Lfault) +#endif PTR_ADDIU v1, 1 R10KCBARRIER(0(ra)) + sb v0, (a0) beqz v0, 2f - sb v0, (a0) PTR_ADDIU t0, 1 + PTR_ADDIU a0, 1 bne t0, a2, 1b - PTR_ADDIU a0, 1 2: PTR_ADDU v0, a1, t0 xor v0, a1 bltz v0, .Lfault - nop + move v0, t0 jr ra # return n - move v0, t0 END(__strncpy_from_user_asm) -.Lfault: jr ra - li v0, -EFAULT +.Lfault: + li v0, -EFAULT + jr ra .section __ex_table,"a" - PTR 1b, .Lfault + PTR_WD 1b, .Lfault .previous + + EXPORT_SYMBOL(__strncpy_from_user_asm) diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S index fcacea5e61f1..c192a6f6cd84 100644 --- a/arch/mips/lib/strnlen_user.S +++ b/arch/mips/lib/strnlen_user.S @@ -6,6 +6,7 @@ * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle * Copyright (c) 1999 Silicon Graphics, Inc. */ +#include <linux/export.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/regdef.h> @@ -13,7 +14,7 @@ #define EX(insn,reg,addr,handler) \ 9: insn reg, addr; \ .section __ex_table,"a"; \ - PTR 9b, handler; \ + PTR_WD 9b, handler; \ .previous /* @@ -26,21 +27,38 @@ * the maximum is a tad hairier ... */ LEAF(__strnlen_user_asm) - LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? - and v0, a0 - bnez v0, .Lfault - -FEXPORT(__strnlen_user_nocheck_asm) move v0, a0 PTR_ADDU a1, a0 # stop pointer -1: beq v0, a1, 1f # limit reached? +1: +#ifdef CONFIG_CPU_DADDI_WORKAROUNDS + .set noat + li AT, 1 +#endif + beq v0, a1, 1f # limit reached? +#ifdef CONFIG_EVA + .set push + .set eva + EX(lbe, t0, (v0), .Lfault) + .set pop +#else EX(lb, t0, (v0), .Lfault) - PTR_ADDIU v0, 1 +#endif + .set noreorder bnez t0, 1b -1: PTR_SUBU v0, a0 +1: +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS + PTR_ADDIU v0, 1 +#else + PTR_ADDU v0, AT + .set at +#endif + .set reorder + PTR_SUBU v0, a0 jr ra END(__strnlen_user_asm) .Lfault: move v0, zero jr ra + + EXPORT_SYMBOL(__strnlen_user_asm) diff --git a/arch/mips/lib/ucmpdi2.c b/arch/mips/lib/ucmpdi2.c deleted file mode 100644 index bb4cb2f828ea..000000000000 --- a/arch/mips/lib/ucmpdi2.c +++ /dev/null @@ -1,21 +0,0 @@ -#include <linux/module.h> - -#include "libgcc.h" - -word_type __ucmpdi2(unsigned long long a, unsigned long long b) -{ - const DWunion au = {.ll = a}; - const DWunion bu = {.ll = b}; - - if ((unsigned int) au.s.high < (unsigned int) bu.s.high) - return 0; - else if ((unsigned int) au.s.high > (unsigned int) bu.s.high) - return 2; - if ((unsigned int) au.s.low < (unsigned int) bu.s.low) - return 0; - else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) - return 2; - return 1; -} - -EXPORT_SYMBOL(__ucmpdi2); diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c index 65e3dfc4e585..f80a67c092b6 100644 --- a/arch/mips/lib/uncached.c +++ b/arch/mips/lib/uncached.c @@ -8,7 +8,6 @@ * Author: Maciej W. Rozycki <macro@mips.com> */ -#include <linux/init.h> #include <asm/addrspace.h> #include <asm/bug.h> @@ -36,12 +35,14 @@ * values, so we can avoid sharing the same stack area between a cached * and the uncached mode. */ -unsigned long __cpuinit run_uncached(void *func) +unsigned long run_uncached(void *func) { - register long sp __asm__("$sp"); register long ret __asm__("$2"); long lfunc = (long)func, ufunc; long usp; + long sp; + + __asm__("move %0, $sp" : "=r" (sp)); if (sp >= (long)CKSEG0 && sp < (long)CKSEG2) usp = CKSEG1ADDR(sp); |
