summaryrefslogtreecommitdiff
path: root/arch/sparc/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/lib')
-rw-r--r--arch/sparc/lib/Makefile5
-rw-r--r--arch/sparc/lib/NGmemcpy.S3
-rw-r--r--arch/sparc/lib/U1memcpy.S2
-rw-r--r--arch/sparc/lib/VISsave.S2
-rw-r--r--arch/sparc/lib/ashldi3.S2
-rw-r--r--arch/sparc/lib/ashrdi3.S2
-rw-r--r--arch/sparc/lib/atomic32.c36
-rw-r--r--arch/sparc/lib/atomic_64.S44
-rw-r--r--arch/sparc/lib/bitops.S2
-rw-r--r--arch/sparc/lib/blockops.S2
-rw-r--r--arch/sparc/lib/bzero.S2
-rw-r--r--arch/sparc/lib/checksum_32.S260
-rw-r--r--arch/sparc/lib/checksum_64.S2
-rw-r--r--arch/sparc/lib/clear_page.S4
-rw-r--r--arch/sparc/lib/cmpdi2.c28
-rw-r--r--arch/sparc/lib/copy_in_user.S2
-rw-r--r--arch/sparc/lib/copy_page.S4
-rw-r--r--arch/sparc/lib/copy_user.S317
-rw-r--r--arch/sparc/lib/csum_copy.S5
-rw-r--r--arch/sparc/lib/csum_copy_from_user.S4
-rw-r--r--arch/sparc/lib/csum_copy_to_user.S4
-rw-r--r--arch/sparc/lib/divdi3.S2
-rw-r--r--arch/sparc/lib/ffs.S2
-rw-r--r--arch/sparc/lib/fls.S2
-rw-r--r--arch/sparc/lib/fls64.S2
-rw-r--r--arch/sparc/lib/hweight.S2
-rw-r--r--arch/sparc/lib/iomap.c2
-rw-r--r--arch/sparc/lib/ipcsum.S2
-rw-r--r--arch/sparc/lib/locks.S2
-rw-r--r--arch/sparc/lib/lshrdi3.S2
-rw-r--r--arch/sparc/lib/mcount.S2
-rw-r--r--arch/sparc/lib/memcmp.S2
-rw-r--r--arch/sparc/lib/memcpy.S3
-rw-r--r--arch/sparc/lib/memmove.S2
-rw-r--r--arch/sparc/lib/memscan_32.S2
-rw-r--r--arch/sparc/lib/memscan_64.S2
-rw-r--r--arch/sparc/lib/memset.S88
-rw-r--r--arch/sparc/lib/muldi3.S2
-rw-r--r--arch/sparc/lib/multi3.S2
-rw-r--r--arch/sparc/lib/strlen.S2
-rw-r--r--arch/sparc/lib/strncmp_32.S2
-rw-r--r--arch/sparc/lib/strncmp_64.S2
-rw-r--r--arch/sparc/lib/ucmpdi2.c20
-rw-r--r--arch/sparc/lib/xor.S2
44 files changed, 295 insertions, 588 deletions
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 063556fe2cb1..ee5091dd67ed 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -3,7 +3,6 @@
#
asflags-y := -ansi -DST_DIV0=0x02
-ccflags-y := -Werror
lib-$(CONFIG_SPARC32) += ashrdi3.o
lib-$(CONFIG_SPARC32) += memcpy.o memset.o
@@ -15,7 +14,7 @@ lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
lib-$(CONFIG_SPARC32) += copy_user.o locks.o
lib-$(CONFIG_SPARC64) += atomic_64.o
lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
-lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
+lib-$(CONFIG_SPARC32) += muldi3.o bitext.o
lib-$(CONFIG_SPARC64) += multi3.o
lib-$(CONFIG_SPARC64) += fls.o
lib-$(CONFIG_SPARC64) += fls64.o
@@ -52,5 +51,5 @@ lib-$(CONFIG_SPARC64) += copy_in_user.o memmove.o
lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
obj-$(CONFIG_SPARC64) += iomap.o
-obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o
+obj-$(CONFIG_SPARC32) += atomic32.o
obj-$(CONFIG_SPARC64) += PeeCeeI.o
diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S
index 8e4d22a6ba0b..ee51c1230689 100644
--- a/arch/sparc/lib/NGmemcpy.S
+++ b/arch/sparc/lib/NGmemcpy.S
@@ -10,8 +10,7 @@
#include <asm/thread_info.h>
#define GLOBAL_SPARE %g7
#define RESTORE_ASI(TMP) \
- ldub [%g6 + TI_CURRENT_DS], TMP; \
- wr TMP, 0x0, %asi;
+ wr %g0, ASI_AIUS, %asi
#else
#define GLOBAL_SPARE %g5
#define RESTORE_ASI(TMP) \
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S
index a6f4ee391897..635398ec7540 100644
--- a/arch/sparc/lib/U1memcpy.S
+++ b/arch/sparc/lib/U1memcpy.S
@@ -6,10 +6,10 @@
*/
#ifdef __KERNEL__
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
-#include <asm/export.h>
#define GLOBAL_SPARE g7
#else
#define GLOBAL_SPARE g5
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
index 9c8eb2017d5b..31a0c336c185 100644
--- a/arch/sparc/lib/VISsave.S
+++ b/arch/sparc/lib/VISsave.S
@@ -7,6 +7,7 @@
* Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asi.h>
@@ -14,7 +15,6 @@
#include <asm/ptrace.h>
#include <asm/visasm.h>
#include <asm/thread_info.h>
-#include <asm/export.h>
/* On entry: %o5=current FPRS value, %g7 is callers address */
/* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */
diff --git a/arch/sparc/lib/ashldi3.S b/arch/sparc/lib/ashldi3.S
index 2d72de88af90..2a9e7c4fb260 100644
--- a/arch/sparc/lib/ashldi3.S
+++ b/arch/sparc/lib/ashldi3.S
@@ -6,8 +6,8 @@
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
ENTRY(__ashldi3)
diff --git a/arch/sparc/lib/ashrdi3.S b/arch/sparc/lib/ashrdi3.S
index 05dfda9f5005..8fd0b311722f 100644
--- a/arch/sparc/lib/ashrdi3.S
+++ b/arch/sparc/lib/ashrdi3.S
@@ -6,8 +6,8 @@
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
ENTRY(__ashrdi3)
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 281fa634bb1a..cf80d1ae352b 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -29,7 +29,7 @@ static DEFINE_SPINLOCK(dummy);
#endif /* SMP */
#define ATOMIC_FETCH_OP(op, c_op) \
-int atomic_fetch_##op(int i, atomic_t *v) \
+int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int ret; \
unsigned long flags; \
@@ -41,10 +41,10 @@ int atomic_fetch_##op(int i, atomic_t *v) \
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
-EXPORT_SYMBOL(atomic_fetch_##op);
+EXPORT_SYMBOL(arch_atomic_fetch_##op);
#define ATOMIC_OP_RETURN(op, c_op) \
-int atomic_##op##_return(int i, atomic_t *v) \
+int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int ret; \
unsigned long flags; \
@@ -55,7 +55,7 @@ int atomic_##op##_return(int i, atomic_t *v) \
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
-EXPORT_SYMBOL(atomic_##op##_return);
+EXPORT_SYMBOL(arch_atomic_##op##_return);
ATOMIC_OP_RETURN(add, +=)
@@ -67,7 +67,7 @@ ATOMIC_FETCH_OP(xor, ^=)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
-int atomic_xchg(atomic_t *v, int new)
+int arch_atomic_xchg(atomic_t *v, int new)
{
int ret;
unsigned long flags;
@@ -78,9 +78,9 @@ int atomic_xchg(atomic_t *v, int new)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(atomic_xchg);
+EXPORT_SYMBOL(arch_atomic_xchg);
-int atomic_cmpxchg(atomic_t *v, int old, int new)
+int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
@@ -93,9 +93,9 @@ int atomic_cmpxchg(atomic_t *v, int old, int new)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(atomic_cmpxchg);
+EXPORT_SYMBOL(arch_atomic_cmpxchg);
-int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
@@ -107,10 +107,10 @@ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(atomic_fetch_add_unless);
+EXPORT_SYMBOL(arch_atomic_fetch_add_unless);
/* Atomic operations are already serializing */
-void atomic_set(atomic_t *v, int i)
+void arch_atomic_set(atomic_t *v, int i)
{
unsigned long flags;
@@ -118,9 +118,9 @@ void atomic_set(atomic_t *v, int i)
v->counter = i;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
}
-EXPORT_SYMBOL(atomic_set);
+EXPORT_SYMBOL(arch_atomic_set);
-unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
+unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask)
{
unsigned long old, flags;
@@ -131,9 +131,9 @@ unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
return old & mask;
}
-EXPORT_SYMBOL(___set_bit);
+EXPORT_SYMBOL(sp32___set_bit);
-unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
+unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask)
{
unsigned long old, flags;
@@ -144,9 +144,9 @@ unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
return old & mask;
}
-EXPORT_SYMBOL(___clear_bit);
+EXPORT_SYMBOL(sp32___clear_bit);
-unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
+unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
{
unsigned long old, flags;
@@ -157,7 +157,7 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
return old & mask;
}
-EXPORT_SYMBOL(___change_bit);
+EXPORT_SYMBOL(sp32___change_bit);
unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
{
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 456b65a30ecf..4f8cab2fb9cd 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -4,10 +4,10 @@
* Copyright (C) 1999, 2007 2012 David S. Miller (davem@davemloft.net)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/backoff.h>
-#include <asm/export.h>
.text
@@ -19,7 +19,7 @@
*/
#define ATOMIC_OP(op) \
-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -30,11 +30,11 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_##op); \
-EXPORT_SYMBOL(atomic_##op);
+ENDPROC(arch_atomic_##op); \
+EXPORT_SYMBOL(arch_atomic_##op);
#define ATOMIC_OP_RETURN(op) \
-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */\
BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -45,11 +45,11 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_##op##_return); \
-EXPORT_SYMBOL(atomic_##op##_return);
+ENDPROC(arch_atomic_##op##_return); \
+EXPORT_SYMBOL(arch_atomic_##op##_return);
#define ATOMIC_FETCH_OP(op) \
-ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -60,8 +60,8 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_fetch_##op); \
-EXPORT_SYMBOL(atomic_fetch_##op);
+ENDPROC(arch_atomic_fetch_##op); \
+EXPORT_SYMBOL(arch_atomic_fetch_##op);
ATOMIC_OP(add)
ATOMIC_OP_RETURN(add)
@@ -85,7 +85,7 @@ ATOMIC_FETCH_OP(xor)
#undef ATOMIC_OP
#define ATOMIC64_OP(op) \
-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -96,11 +96,11 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_##op); \
-EXPORT_SYMBOL(atomic64_##op);
+ENDPROC(arch_atomic64_##op); \
+EXPORT_SYMBOL(arch_atomic64_##op);
#define ATOMIC64_OP_RETURN(op) \
-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -111,11 +111,11 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
op %g1, %o0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_##op##_return); \
-EXPORT_SYMBOL(atomic64_##op##_return);
+ENDPROC(arch_atomic64_##op##_return); \
+EXPORT_SYMBOL(arch_atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op) \
-ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -126,8 +126,8 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
mov %g1, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_fetch_##op); \
-EXPORT_SYMBOL(atomic64_fetch_##op);
+ENDPROC(arch_atomic64_fetch_##op); \
+EXPORT_SYMBOL(arch_atomic64_fetch_##op);
ATOMIC64_OP(add)
ATOMIC64_OP_RETURN(add)
@@ -150,7 +150,7 @@ ATOMIC64_FETCH_OP(xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
+ENTRY(arch_atomic64_dec_if_positive) /* %o0 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o0], %g1
brlez,pn %g1, 3f
@@ -162,5 +162,5 @@ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
3: retl
sub %g1, 1, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
-ENDPROC(atomic64_dec_if_positive)
-EXPORT_SYMBOL(atomic64_dec_if_positive)
+ENDPROC(arch_atomic64_dec_if_positive)
+EXPORT_SYMBOL(arch_atomic64_dec_if_positive)
diff --git a/arch/sparc/lib/bitops.S b/arch/sparc/lib/bitops.S
index 9d647f977618..9c91cbb310e7 100644
--- a/arch/sparc/lib/bitops.S
+++ b/arch/sparc/lib/bitops.S
@@ -4,10 +4,10 @@
* Copyright (C) 2000, 2007 David S. Miller (davem@davemloft.net)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/backoff.h>
-#include <asm/export.h>
.text
diff --git a/arch/sparc/lib/blockops.S b/arch/sparc/lib/blockops.S
index 76ddd1ff6833..5b92959a4d48 100644
--- a/arch/sparc/lib/blockops.S
+++ b/arch/sparc/lib/blockops.S
@@ -5,9 +5,9 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/page.h>
-#include <asm/export.h>
/* Zero out 64 bytes of memory at (buf + offset).
* Assumes %g1 contains zero.
diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S
index 87fec4cbe10c..2bfa44a6b25e 100644
--- a/arch/sparc/lib/bzero.S
+++ b/arch/sparc/lib/bzero.S
@@ -5,8 +5,8 @@
* Copyright (C) 2005 David S. Miller <davem@davemloft.net>
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 6a5469c97246..66eda40fce36 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -14,8 +14,8 @@
* BSD4.4 portable checksum routine
*/
+#include <linux/export.h>
#include <asm/errno.h>
-#include <asm/export.h>
#define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \
ldd [buf + offset + 0x00], t0; \
@@ -144,44 +144,14 @@ cpte: bne csum_partial_end_cruft ! yep, handle it
cpout: retl ! get outta here
mov %o2, %o0 ! return computed csum
- .globl __csum_partial_copy_start, __csum_partial_copy_end
-__csum_partial_copy_start:
-
/* Work around cpp -rob */
#define ALLOC #alloc
#define EXECINSTR #execinstr
-#define EX(x,y,a,b) \
-98: x,y; \
- .section .fixup,ALLOC,EXECINSTR; \
- .align 4; \
-99: ba 30f; \
- a, b, %o3; \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4
-
-#define EX2(x,y) \
-98: x,y; \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word 98b, 30f; \
- .text; \
- .align 4
-
-#define EX3(x,y) \
+#define EX(x,y) \
98: x,y; \
.section __ex_table,ALLOC; \
.align 4; \
- .word 98b, 96f; \
- .text; \
- .align 4
-
-#define EXT(start,end,handler) \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word start, 0, end, handler; \
+ .word 98b, cc_fault; \
.text; \
.align 4
@@ -192,20 +162,20 @@ __csum_partial_copy_start:
* please check the fixup code below as well.
*/
#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [src + off + 0x00], t0; \
- ldd [src + off + 0x08], t2; \
+ EX(ldd [src + off + 0x00], t0); \
+ EX(ldd [src + off + 0x08], t2); \
addxcc t0, sum, sum; \
- ldd [src + off + 0x10], t4; \
+ EX(ldd [src + off + 0x10], t4); \
addxcc t1, sum, sum; \
- ldd [src + off + 0x18], t6; \
+ EX(ldd [src + off + 0x18], t6); \
addxcc t2, sum, sum; \
- std t0, [dst + off + 0x00]; \
+ EX(std t0, [dst + off + 0x00]); \
addxcc t3, sum, sum; \
- std t2, [dst + off + 0x08]; \
+ EX(std t2, [dst + off + 0x08]); \
addxcc t4, sum, sum; \
- std t4, [dst + off + 0x10]; \
+ EX(std t4, [dst + off + 0x10]); \
addxcc t5, sum, sum; \
- std t6, [dst + off + 0x18]; \
+ EX(std t6, [dst + off + 0x18]); \
addxcc t6, sum, sum; \
addxcc t7, sum, sum;
@@ -214,59 +184,59 @@ __csum_partial_copy_start:
* Viking MXCC into streaming mode. Ho hum...
*/
#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [src + off + 0x00], t0; \
- ldd [src + off + 0x08], t2; \
- ldd [src + off + 0x10], t4; \
- ldd [src + off + 0x18], t6; \
- st t0, [dst + off + 0x00]; \
+ EX(ldd [src + off + 0x00], t0); \
+ EX(ldd [src + off + 0x08], t2); \
+ EX(ldd [src + off + 0x10], t4); \
+ EX(ldd [src + off + 0x18], t6); \
+ EX(st t0, [dst + off + 0x00]); \
addxcc t0, sum, sum; \
- st t1, [dst + off + 0x04]; \
+ EX(st t1, [dst + off + 0x04]); \
addxcc t1, sum, sum; \
- st t2, [dst + off + 0x08]; \
+ EX(st t2, [dst + off + 0x08]); \
addxcc t2, sum, sum; \
- st t3, [dst + off + 0x0c]; \
+ EX(st t3, [dst + off + 0x0c]); \
addxcc t3, sum, sum; \
- st t4, [dst + off + 0x10]; \
+ EX(st t4, [dst + off + 0x10]); \
addxcc t4, sum, sum; \
- st t5, [dst + off + 0x14]; \
+ EX(st t5, [dst + off + 0x14]); \
addxcc t5, sum, sum; \
- st t6, [dst + off + 0x18]; \
+ EX(st t6, [dst + off + 0x18]); \
addxcc t6, sum, sum; \
- st t7, [dst + off + 0x1c]; \
+ EX(st t7, [dst + off + 0x1c]); \
addxcc t7, sum, sum;
/* Yuck, 6 superscalar cycles... */
#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
- ldd [src - off - 0x08], t0; \
- ldd [src - off - 0x00], t2; \
+ EX(ldd [src - off - 0x08], t0); \
+ EX(ldd [src - off - 0x00], t2); \
addxcc t0, sum, sum; \
- st t0, [dst - off - 0x08]; \
+ EX(st t0, [dst - off - 0x08]); \
addxcc t1, sum, sum; \
- st t1, [dst - off - 0x04]; \
+ EX(st t1, [dst - off - 0x04]); \
addxcc t2, sum, sum; \
- st t2, [dst - off - 0x00]; \
+ EX(st t2, [dst - off - 0x00]); \
addxcc t3, sum, sum; \
- st t3, [dst - off + 0x04];
+ EX(st t3, [dst - off + 0x04]);
/* Handle the end cruft code out of band for better cache patterns. */
cc_end_cruft:
be 1f
andcc %o3, 4, %g0
- EX(ldd [%o0 + 0x00], %g2, and %o3, 0xf)
+ EX(ldd [%o0 + 0x00], %g2)
add %o1, 8, %o1
addcc %g2, %g7, %g7
add %o0, 8, %o0
addxcc %g3, %g7, %g7
- EX2(st %g2, [%o1 - 0x08])
+ EX(st %g2, [%o1 - 0x08])
addx %g0, %g7, %g7
andcc %o3, 4, %g0
- EX2(st %g3, [%o1 - 0x04])
+ EX(st %g3, [%o1 - 0x04])
1: be 1f
andcc %o3, 3, %o3
- EX(ld [%o0 + 0x00], %g2, add %o3, 4)
+ EX(ld [%o0 + 0x00], %g2)
add %o1, 4, %o1
addcc %g2, %g7, %g7
- EX2(st %g2, [%o1 - 0x04])
+ EX(st %g2, [%o1 - 0x04])
addx %g0, %g7, %g7
andcc %o3, 3, %g0
add %o0, 4, %o0
@@ -276,14 +246,14 @@ cc_end_cruft:
subcc %o3, 2, %o3
b 4f
or %g0, %g0, %o4
-2: EX(lduh [%o0 + 0x00], %o4, add %o3, 2)
+2: EX(lduh [%o0 + 0x00], %o4)
add %o0, 2, %o0
- EX2(sth %o4, [%o1 + 0x00])
+ EX(sth %o4, [%o1 + 0x00])
be 6f
add %o1, 2, %o1
sll %o4, 16, %o4
-4: EX(ldub [%o0 + 0x00], %o5, add %g0, 1)
- EX2(stb %o5, [%o1 + 0x00])
+4: EX(ldub [%o0 + 0x00], %o5)
+ EX(stb %o5, [%o1 + 0x00])
sll %o5, 8, %o5
or %o5, %o4, %o4
6: addcc %o4, %g7, %g7
@@ -306,9 +276,9 @@ cc_dword_align:
andcc %o0, 0x2, %g0
be 1f
andcc %o0, 0x4, %g0
- EX(lduh [%o0 + 0x00], %g4, add %g1, 0)
+ EX(lduh [%o0 + 0x00], %g4)
sub %g1, 2, %g1
- EX2(sth %g4, [%o1 + 0x00])
+ EX(sth %g4, [%o1 + 0x00])
add %o0, 2, %o0
sll %g4, 16, %g4
addcc %g4, %g7, %g7
@@ -322,9 +292,9 @@ cc_dword_align:
or %g3, %g7, %g7
1: be 3f
andcc %g1, 0xffffff80, %g0
- EX(ld [%o0 + 0x00], %g4, add %g1, 0)
+ EX(ld [%o0 + 0x00], %g4)
sub %g1, 4, %g1
- EX2(st %g4, [%o1 + 0x00])
+ EX(st %g4, [%o1 + 0x00])
add %o0, 4, %o0
addcc %g4, %g7, %g7
add %o1, 4, %o1
@@ -354,7 +324,6 @@ __csum_partial_copy_sparc_generic:
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
-10: EXT(5b, 10b, 20f) ! note for exception handling
sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum?
@@ -379,8 +348,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
-12: EXT(cctbl, 12b, 22f) ! note for exception table handling
- addx %g0, %g7, %g7
+12: addx %g0, %g7, %g7
andcc %o3, 0xf, %g0 ! check for low bits set
ccte: bne cc_end_cruft ! something left, handle it out of band
andcc %o3, 8, %g0 ! begin checks for that code
@@ -390,7 +358,6 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
-11: EXT(ccdbl, 11b, 21f) ! note for exception table handling
sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum?
@@ -407,9 +374,9 @@ ccslow: cmp %g1, 0
be,a 1f
srl %g1, 1, %g4
sub %g1, 1, %g1
- EX(ldub [%o0], %g5, add %g1, 1)
+ EX(ldub [%o0], %g5)
add %o0, 1, %o0
- EX2(stb %g5, [%o1])
+ EX(stb %g5, [%o1])
srl %g1, 1, %g4
add %o1, 1, %o1
1: cmp %g4, 0
@@ -418,34 +385,34 @@ ccslow: cmp %g1, 0
andcc %o0, 2, %g0
be,a 1f
srl %g4, 1, %g4
- EX(lduh [%o0], %o4, add %g1, 0)
+ EX(lduh [%o0], %o4)
sub %g1, 2, %g1
srl %o4, 8, %g2
sub %g4, 1, %g4
- EX2(stb %g2, [%o1])
+ EX(stb %g2, [%o1])
add %o4, %g5, %g5
- EX2(stb %o4, [%o1 + 1])
+ EX(stb %o4, [%o1 + 1])
add %o0, 2, %o0
srl %g4, 1, %g4
add %o1, 2, %o1
1: cmp %g4, 0
be,a 2f
andcc %g1, 2, %g0
- EX3(ld [%o0], %o4)
+ EX(ld [%o0], %o4)
5: srl %o4, 24, %g2
srl %o4, 16, %g3
- EX2(stb %g2, [%o1])
+ EX(stb %g2, [%o1])
srl %o4, 8, %g2
- EX2(stb %g3, [%o1 + 1])
+ EX(stb %g3, [%o1 + 1])
add %o0, 4, %o0
- EX2(stb %g2, [%o1 + 2])
+ EX(stb %g2, [%o1 + 2])
addcc %o4, %g5, %g5
- EX2(stb %o4, [%o1 + 3])
+ EX(stb %o4, [%o1 + 3])
addx %g5, %g0, %g5 ! I am now to lazy to optimize this (question it
add %o1, 4, %o1 ! is worthy). Maybe some day - with the sll/srl
subcc %g4, 1, %g4 ! tricks
bne,a 5b
- EX3(ld [%o0], %o4)
+ EX(ld [%o0], %o4)
sll %g5, 16, %g2
srl %g5, 16, %g5
srl %g2, 16, %g2
@@ -453,19 +420,19 @@ ccslow: cmp %g1, 0
add %g2, %g5, %g5
2: be,a 3f
andcc %g1, 1, %g0
- EX(lduh [%o0], %o4, and %g1, 3)
+ EX(lduh [%o0], %o4)
andcc %g1, 1, %g0
srl %o4, 8, %g2
add %o0, 2, %o0
- EX2(stb %g2, [%o1])
+ EX(stb %g2, [%o1])
add %g5, %o4, %g5
- EX2(stb %o4, [%o1 + 1])
+ EX(stb %o4, [%o1 + 1])
add %o1, 2, %o1
3: be,a 1f
sll %g5, 16, %o4
- EX(ldub [%o0], %g2, add %g0, 1)
+ EX(ldub [%o0], %g2)
sll %g2, 8, %o4
- EX2(stb %g2, [%o1])
+ EX(stb %g2, [%o1])
add %g5, %o4, %g5
sll %g5, 16, %o4
1: addcc %o4, %g5, %g5
@@ -481,113 +448,10 @@ ccslow: cmp %g1, 0
4: addcc %g7, %g5, %g7
retl
addx %g0, %g7, %o0
-__csum_partial_copy_end:
/* We do these strange calculations for the csum_*_from_user case only, ie.
* we only bother with faults on loads... */
-/* o2 = ((g2%20)&3)*8
- * o3 = g1 - (g2/20)*32 - o2 */
-20:
- cmp %g2, 20
- blu,a 1f
- and %g2, 3, %o2
- sub %g1, 32, %g1
- b 20b
- sub %g2, 20, %g2
-1:
- sll %o2, 3, %o2
- b 31f
- sub %g1, %o2, %o3
-
-/* o2 = (!(g2 & 15) ? 0 : (((g2 & 15) + 1) & ~1)*8)
- * o3 = g1 - (g2/16)*32 - o2 */
-21:
- andcc %g2, 15, %o3
- srl %g2, 4, %g2
- be,a 1f
- clr %o2
- add %o3, 1, %o3
- and %o3, 14, %o3
- sll %o3, 3, %o2
-1:
- sll %g2, 5, %g2
- sub %g1, %g2, %o3
- b 31f
- sub %o3, %o2, %o3
-
-/* o0 += (g2/10)*16 - 0x70
- * 01 += (g2/10)*16 - 0x70
- * o2 = (g2 % 10) ? 8 : 0
- * o3 += 0x70 - (g2/10)*16 - o2 */
-22:
- cmp %g2, 10
- blu,a 1f
- sub %o0, 0x70, %o0
- add %o0, 16, %o0
- add %o1, 16, %o1
- sub %o3, 16, %o3
- b 22b
- sub %g2, 10, %g2
-1:
- sub %o1, 0x70, %o1
- add %o3, 0x70, %o3
- clr %o2
- tst %g2
- bne,a 1f
- mov 8, %o2
-1:
- b 31f
- sub %o3, %o2, %o3
-96:
- and %g1, 3, %g1
- sll %g4, 2, %g4
- add %g1, %g4, %o3
-30:
-/* %o1 is dst
- * %o3 is # bytes to zero out
- * %o4 is faulting address
- * %o5 is %pc where fault occurred */
- clr %o2
-31:
-/* %o0 is src
- * %o1 is dst
- * %o2 is # of bytes to copy from src to dst
- * %o3 is # bytes to zero out
- * %o4 is faulting address
- * %o5 is %pc where fault occurred */
- save %sp, -104, %sp
- mov %i5, %o0
- mov %i7, %o1
- mov %i4, %o2
- call lookup_fault
- mov %g7, %i4
- cmp %o0, 2
- bne 1f
- add %g0, -EFAULT, %i5
- tst %i2
- be 2f
- mov %i0, %o1
- mov %i1, %o0
-5:
- call memcpy
- mov %i2, %o2
- tst %o0
- bne,a 2f
- add %i3, %i2, %i3
- add %i1, %i2, %i1
-2:
- mov %i1, %o0
-6:
- call __bzero
- mov %i3, %o1
-1:
- ld [%sp + 168], %o2 ! struct_ptr of parent
- st %i5, [%o2]
- ret
- restore
-
- .section __ex_table,#alloc
- .align 4
- .word 5b,2
- .word 6b,2
+cc_fault:
+ retl
+ clr %o0
diff --git a/arch/sparc/lib/checksum_64.S b/arch/sparc/lib/checksum_64.S
index 9700ef1730df..32b626f3fe4d 100644
--- a/arch/sparc/lib/checksum_64.S
+++ b/arch/sparc/lib/checksum_64.S
@@ -14,7 +14,7 @@
* BSD4.4 portable checksum routine
*/
-#include <asm/export.h>
+#include <linux/export.h>
.text
csum_partial_fix_alignment:
diff --git a/arch/sparc/lib/clear_page.S b/arch/sparc/lib/clear_page.S
index 8a6c783a6301..e63458194f5a 100644
--- a/arch/sparc/lib/clear_page.S
+++ b/arch/sparc/lib/clear_page.S
@@ -5,13 +5,13 @@
* Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
*/
+#include <linux/export.h>
+#include <linux/pgtable.h>
#include <asm/visasm.h>
#include <asm/thread_info.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/spitfire.h>
#include <asm/head.h>
-#include <asm/export.h>
/* What we used to do was lock a TLB entry into a specific
* TLB slot, clear the page with interrupts disabled, then
diff --git a/arch/sparc/lib/cmpdi2.c b/arch/sparc/lib/cmpdi2.c
deleted file mode 100644
index 333367fe7353..000000000000
--- a/arch/sparc/lib/cmpdi2.c
+++ /dev/null
@@ -1,28 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/module.h>
-
-#include "libgcc.h"
-
-word_type __cmpdi2(long long a, long long b)
-{
- const DWunion au = {
- .ll = a
- };
- const DWunion bu = {
- .ll = b
- };
-
- if (au.s.high < bu.s.high)
- return 0;
- else if (au.s.high > bu.s.high)
- return 2;
-
- if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
- return 0;
- else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
- return 2;
-
- return 1;
-}
-
-EXPORT_SYMBOL(__cmpdi2);
diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S
index 66e90bf528e2..e23e6a69ff92 100644
--- a/arch/sparc/lib/copy_in_user.S
+++ b/arch/sparc/lib/copy_in_user.S
@@ -4,9 +4,9 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asi.h>
-#include <asm/export.h>
#define XCC xcc
diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S
index c088e871e8e3..7a041f3ebc58 100644
--- a/arch/sparc/lib/copy_page.S
+++ b/arch/sparc/lib/copy_page.S
@@ -5,13 +5,13 @@
* Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
*/
+#include <linux/export.h>
#include <asm/visasm.h>
#include <asm/thread_info.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/spitfire.h>
#include <asm/head.h>
-#include <asm/export.h>
/* What we used to do was lock a TLB entry into a specific
* TLB slot, clear the page with interrupts disabled, then
diff --git a/arch/sparc/lib/copy_user.S b/arch/sparc/lib/copy_user.S
index dc72f2b970b7..7bb2ef68881d 100644
--- a/arch/sparc/lib/copy_user.S
+++ b/arch/sparc/lib/copy_user.S
@@ -12,107 +12,143 @@
* Returns 0 if successful, otherwise count of bytes not copied yet
*/
+#include <linux/export.h>
#include <asm/ptrace.h>
#include <asm/asmmacro.h>
#include <asm/page.h>
#include <asm/thread_info.h>
-#include <asm/export.h>
/* Work around cpp -rob */
#define ALLOC #alloc
#define EXECINSTR #execinstr
+
+#define EX_ENTRY(l1, l2) \
+ .section __ex_table,ALLOC; \
+ .align 4; \
+ .word l1, l2; \
+ .text;
+
#define EX(x,y,a,b) \
98: x,y; \
.section .fixup,ALLOC,EXECINSTR; \
.align 4; \
-99: ba fixupretl; \
- a, b, %g3; \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4
+99: retl; \
+ a, b, %o0; \
+ EX_ENTRY(98b, 99b)
#define EX2(x,y,c,d,e,a,b) \
98: x,y; \
.section .fixup,ALLOC,EXECINSTR; \
.align 4; \
99: c, d, e; \
- ba fixupretl; \
- a, b, %g3; \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4
+ retl; \
+ a, b, %o0; \
+ EX_ENTRY(98b, 99b)
#define EXO2(x,y) \
98: x, y; \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word 98b, 97f; \
- .text; \
- .align 4
+ EX_ENTRY(98b, 97f)
-#define EXT(start,end,handler) \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word start, 0, end, handler; \
- .text; \
- .align 4
+#define LD(insn, src, offset, reg, label) \
+98: insn [%src + (offset)], %reg; \
+ .section .fixup,ALLOC,EXECINSTR; \
+99: ba label; \
+ mov offset, %g5; \
+ EX_ENTRY(98b, 99b)
-/* Please do not change following macros unless you change logic used
- * in .fixup at the end of this file as well
- */
+#define ST(insn, dst, offset, reg, label) \
+98: insn %reg, [%dst + (offset)]; \
+ .section .fixup,ALLOC,EXECINSTR; \
+99: ba label; \
+ mov offset, %g5; \
+ EX_ENTRY(98b, 99b)
/* Both these macros have to start with exactly the same insn */
+/* left: g7 + (g1 % 128) - offset */
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- ldd [%src + (offset) + 0x10], %t4; \
- ldd [%src + (offset) + 0x18], %t6; \
- st %t0, [%dst + (offset) + 0x00]; \
- st %t1, [%dst + (offset) + 0x04]; \
- st %t2, [%dst + (offset) + 0x08]; \
- st %t3, [%dst + (offset) + 0x0c]; \
- st %t4, [%dst + (offset) + 0x10]; \
- st %t5, [%dst + (offset) + 0x14]; \
- st %t6, [%dst + (offset) + 0x18]; \
- st %t7, [%dst + (offset) + 0x1c];
-
+ LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \
+ LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \
+ LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \
+ LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \
+ ST(st, dst, offset + 0x00, t0, bigchunk_fault) \
+ ST(st, dst, offset + 0x04, t1, bigchunk_fault) \
+ ST(st, dst, offset + 0x08, t2, bigchunk_fault) \
+ ST(st, dst, offset + 0x0c, t3, bigchunk_fault) \
+ ST(st, dst, offset + 0x10, t4, bigchunk_fault) \
+ ST(st, dst, offset + 0x14, t5, bigchunk_fault) \
+ ST(st, dst, offset + 0x18, t6, bigchunk_fault) \
+ ST(st, dst, offset + 0x1c, t7, bigchunk_fault)
+
+/* left: g7 + (g1 % 128) - offset */
#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- ldd [%src + (offset) + 0x10], %t4; \
- ldd [%src + (offset) + 0x18], %t6; \
- std %t0, [%dst + (offset) + 0x00]; \
- std %t2, [%dst + (offset) + 0x08]; \
- std %t4, [%dst + (offset) + 0x10]; \
- std %t6, [%dst + (offset) + 0x18];
+ LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \
+ LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \
+ LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \
+ LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \
+ ST(std, dst, offset + 0x00, t0, bigchunk_fault) \
+ ST(std, dst, offset + 0x08, t2, bigchunk_fault) \
+ ST(std, dst, offset + 0x10, t4, bigchunk_fault) \
+ ST(std, dst, offset + 0x18, t6, bigchunk_fault)
+ .section .fixup,#alloc,#execinstr
+bigchunk_fault:
+ sub %g7, %g5, %o0
+ and %g1, 127, %g1
+ retl
+ add %o0, %g1, %o0
+
+/* left: offset + 16 + (g1 % 16) */
#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src - (offset) - 0x10], %t0; \
- ldd [%src - (offset) - 0x08], %t2; \
- st %t0, [%dst - (offset) - 0x10]; \
- st %t1, [%dst - (offset) - 0x0c]; \
- st %t2, [%dst - (offset) - 0x08]; \
- st %t3, [%dst - (offset) - 0x04];
+ LD(ldd, src, -(offset + 0x10), t0, lastchunk_fault) \
+ LD(ldd, src, -(offset + 0x08), t2, lastchunk_fault) \
+ ST(st, dst, -(offset + 0x10), t0, lastchunk_fault) \
+ ST(st, dst, -(offset + 0x0c), t1, lastchunk_fault) \
+ ST(st, dst, -(offset + 0x08), t2, lastchunk_fault) \
+ ST(st, dst, -(offset + 0x04), t3, lastchunk_fault)
-#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
- lduh [%src + (offset) + 0x00], %t0; \
- lduh [%src + (offset) + 0x02], %t1; \
- lduh [%src + (offset) + 0x04], %t2; \
- lduh [%src + (offset) + 0x06], %t3; \
- sth %t0, [%dst + (offset) + 0x00]; \
- sth %t1, [%dst + (offset) + 0x02]; \
- sth %t2, [%dst + (offset) + 0x04]; \
- sth %t3, [%dst + (offset) + 0x06];
+ .section .fixup,#alloc,#execinstr
+lastchunk_fault:
+ and %g1, 15, %g1
+ retl
+ sub %g1, %g5, %o0
+/* left: o3 + (o2 % 16) - offset */
+#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ LD(lduh, src, offset + 0x00, t0, halfchunk_fault) \
+ LD(lduh, src, offset + 0x02, t1, halfchunk_fault) \
+ LD(lduh, src, offset + 0x04, t2, halfchunk_fault) \
+ LD(lduh, src, offset + 0x06, t3, halfchunk_fault) \
+ ST(sth, dst, offset + 0x00, t0, halfchunk_fault) \
+ ST(sth, dst, offset + 0x02, t1, halfchunk_fault) \
+ ST(sth, dst, offset + 0x04, t2, halfchunk_fault) \
+ ST(sth, dst, offset + 0x06, t3, halfchunk_fault)
+
+/* left: o3 + (o2 % 16) + offset + 2 */
#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
- ldub [%src - (offset) - 0x02], %t0; \
- ldub [%src - (offset) - 0x01], %t1; \
- stb %t0, [%dst - (offset) - 0x02]; \
- stb %t1, [%dst - (offset) - 0x01];
+ LD(ldub, src, -(offset + 0x02), t0, halfchunk_fault) \
+ LD(ldub, src, -(offset + 0x01), t1, halfchunk_fault) \
+ ST(stb, dst, -(offset + 0x02), t0, halfchunk_fault) \
+ ST(stb, dst, -(offset + 0x01), t1, halfchunk_fault)
+
+ .section .fixup,#alloc,#execinstr
+halfchunk_fault:
+ and %o2, 15, %o2
+ sub %o3, %g5, %o3
+ retl
+ add %o2, %o3, %o0
+
+/* left: offset + 2 + (o2 % 2) */
+#define MOVE_LAST_SHORTCHUNK(src, dst, offset, t0, t1) \
+ LD(ldub, src, -(offset + 0x02), t0, last_shortchunk_fault) \
+ LD(ldub, src, -(offset + 0x01), t1, last_shortchunk_fault) \
+ ST(stb, dst, -(offset + 0x02), t0, last_shortchunk_fault) \
+ ST(stb, dst, -(offset + 0x01), t1, last_shortchunk_fault)
+
+ .section .fixup,#alloc,#execinstr
+last_shortchunk_fault:
+ and %o2, 1, %o2
+ retl
+ sub %o2, %g5, %o0
.text
.align 4
@@ -182,8 +218,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */
MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
-80:
- EXT(5b, 80b, 50f)
subcc %g7, 128, %g7
add %o1, 128, %o1
bne 5b
@@ -201,7 +235,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */
jmpl %o5 + %lo(copy_user_table_end), %g0
add %o0, %g7, %o0
-copy_user_table:
MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
@@ -210,7 +243,6 @@ copy_user_table:
MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
copy_user_table_end:
- EXT(copy_user_table, copy_user_table_end, 51f)
be copy_user_last7
andcc %g1, 4, %g0
@@ -250,8 +282,6 @@ ldd_std:
MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
-81:
- EXT(ldd_std, 81b, 52f)
subcc %g7, 128, %g7
add %o1, 128, %o1
bne ldd_std
@@ -290,8 +320,6 @@ cannot_optimize:
10:
MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5)
-82:
- EXT(10b, 82b, 53f)
subcc %o3, 0x10, %o3
add %o1, 0x10, %o1
bne 10b
@@ -308,8 +336,6 @@ byte_chunk:
MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3)
MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3)
MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3)
-83:
- EXT(byte_chunk, 83b, 54f)
subcc %o3, 0x10, %o3
add %o1, 0x10, %o1
bne byte_chunk
@@ -325,16 +351,14 @@ short_end:
add %o1, %o3, %o1
jmpl %o5 + %lo(short_table_end), %g0
andcc %o2, 1, %g0
-84:
- MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
+ MOVE_LAST_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
+ MOVE_LAST_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
+ MOVE_LAST_SHORTCHUNK(o1, o0, 0x08, g2, g3)
+ MOVE_LAST_SHORTCHUNK(o1, o0, 0x06, g2, g3)
+ MOVE_LAST_SHORTCHUNK(o1, o0, 0x04, g2, g3)
+ MOVE_LAST_SHORTCHUNK(o1, o0, 0x02, g2, g3)
+ MOVE_LAST_SHORTCHUNK(o1, o0, 0x00, g2, g3)
short_table_end:
- EXT(84b, short_table_end, 55f)
be 1f
nop
EX(ldub [%o1], %g2, add %g0, 1)
@@ -363,123 +387,8 @@ short_aligned_end:
.section .fixup,#alloc,#execinstr
.align 4
97:
- mov %o2, %g3
-fixupretl:
retl
- mov %g3, %o0
-
-/* exception routine sets %g2 to (broken_insn - first_insn)>>2 */
-50:
-/* This magic counts how many bytes are left when crash in MOVE_BIGCHUNK
- * happens. This is derived from the amount ldd reads, st stores, etc.
- * x = g2 % 12;
- * g3 = g1 + g7 - ((g2 / 12) * 32 + (x < 4) ? 0 : (x - 4) * 4);
- * o0 += (g2 / 12) * 32;
- */
- cmp %g2, 12
- add %o0, %g7, %o0
- bcs 1f
- cmp %g2, 24
- bcs 2f
- cmp %g2, 36
- bcs 3f
- nop
- sub %g2, 12, %g2
- sub %g7, 32, %g7
-3: sub %g2, 12, %g2
- sub %g7, 32, %g7
-2: sub %g2, 12, %g2
- sub %g7, 32, %g7
-1: cmp %g2, 4
- bcs,a 60f
- clr %g2
- sub %g2, 4, %g2
- sll %g2, 2, %g2
-60: and %g1, 0x7f, %g3
- sub %o0, %g7, %o0
- add %g3, %g7, %g3
- ba fixupretl
- sub %g3, %g2, %g3
-51:
-/* i = 41 - g2; j = i % 6;
- * g3 = (g1 & 15) + (i / 6) * 16 + (j < 4) ? (j + 1) * 4 : 16;
- * o0 -= (i / 6) * 16 + 16;
- */
- neg %g2
- and %g1, 0xf, %g1
- add %g2, 41, %g2
- add %o0, %g1, %o0
-1: cmp %g2, 6
- bcs,a 2f
- cmp %g2, 4
- add %g1, 16, %g1
- b 1b
- sub %g2, 6, %g2
-2: bcc,a 2f
- mov 16, %g2
- inc %g2
- sll %g2, 2, %g2
-2: add %g1, %g2, %g3
- ba fixupretl
- sub %o0, %g3, %o0
-52:
-/* g3 = g1 + g7 - (g2 / 8) * 32 + (g2 & 4) ? (g2 & 3) * 8 : 0;
- o0 += (g2 / 8) * 32 */
- andn %g2, 7, %g4
- add %o0, %g7, %o0
- andcc %g2, 4, %g0
- and %g2, 3, %g2
- sll %g4, 2, %g4
- sll %g2, 3, %g2
- bne 60b
- sub %g7, %g4, %g7
- ba 60b
- clr %g2
-53:
-/* g3 = o3 + (o2 & 15) - (g2 & 8) - (g2 & 4) ? (g2 & 3) * 2 : 0;
- o0 += (g2 & 8) */
- and %g2, 3, %g4
- andcc %g2, 4, %g0
- and %g2, 8, %g2
- sll %g4, 1, %g4
- be 1f
- add %o0, %g2, %o0
- add %g2, %g4, %g2
-1: and %o2, 0xf, %g3
- add %g3, %o3, %g3
- ba fixupretl
- sub %g3, %g2, %g3
-54:
-/* g3 = o3 + (o2 & 15) - (g2 / 4) * 2 - (g2 & 2) ? (g2 & 1) : 0;
- o0 += (g2 / 4) * 2 */
- srl %g2, 2, %o4
- and %g2, 1, %o5
- srl %g2, 1, %g2
- add %o4, %o4, %o4
- and %o5, %g2, %o5
- and %o2, 0xf, %o2
- add %o0, %o4, %o0
- sub %o3, %o5, %o3
- sub %o2, %o4, %o2
- ba fixupretl
- add %o2, %o3, %g3
-55:
-/* i = 27 - g2;
- g3 = (o2 & 1) + i / 4 * 2 + !(i & 3);
- o0 -= i / 4 * 2 + 1 */
- neg %g2
- and %o2, 1, %o2
- add %g2, 27, %g2
- srl %g2, 2, %o5
- andcc %g2, 3, %g0
- mov 1, %g2
- add %o5, %o5, %o5
- be,a 1f
- clr %g2
-1: add %g2, %o5, %g3
- sub %o0, %g3, %o0
- ba fixupretl
- add %g3, %o2, %g3
+ mov %o2, %o0
.globl __copy_user_end
__copy_user_end:
diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S
index 26c644ba3ecb..f968e83bc93b 100644
--- a/arch/sparc/lib/csum_copy.S
+++ b/arch/sparc/lib/csum_copy.S
@@ -4,7 +4,7 @@
* Copyright (C) 2005 David S. Miller <davem@davemloft.net>
*/
-#include <asm/export.h>
+#include <linux/export.h>
#ifdef __KERNEL__
#define GLOBAL_SPARE %g7
@@ -68,9 +68,10 @@
.globl FUNC_NAME
.type FUNC_NAME,#function
EXPORT_SYMBOL(FUNC_NAME)
-FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */
+FUNC_NAME: /* %o0=src, %o1=dst, %o2=len */
LOAD(prefetch, %o0 + 0x000, #n_reads)
xor %o0, %o1, %g1
+ mov -1, %o3
clr %o4
andcc %g1, 0x3, %g0
bne,pn %icc, 95f
diff --git a/arch/sparc/lib/csum_copy_from_user.S b/arch/sparc/lib/csum_copy_from_user.S
index d20b9594f0c7..b0ba8d4dd439 100644
--- a/arch/sparc/lib/csum_copy_from_user.S
+++ b/arch/sparc/lib/csum_copy_from_user.S
@@ -9,14 +9,14 @@
.section .fixup, "ax"; \
.align 4; \
99: retl; \
- mov -1, %o0; \
+ mov 0, %o0; \
.section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-#define FUNC_NAME __csum_partial_copy_from_user
+#define FUNC_NAME csum_and_copy_from_user
#define LOAD(type,addr,dest) type##a [addr] %asi, dest
#include "csum_copy.S"
diff --git a/arch/sparc/lib/csum_copy_to_user.S b/arch/sparc/lib/csum_copy_to_user.S
index d71c0c81e8ab..91ba36dbf7d2 100644
--- a/arch/sparc/lib/csum_copy_to_user.S
+++ b/arch/sparc/lib/csum_copy_to_user.S
@@ -9,14 +9,14 @@
.section .fixup,"ax"; \
.align 4; \
99: retl; \
- mov -1, %o0; \
+ mov 0, %o0; \
.section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-#define FUNC_NAME __csum_partial_copy_to_user
+#define FUNC_NAME csum_and_copy_to_user
#define STORE(type,src,addr) type##a src, [addr] %asi
#include "csum_copy.S"
diff --git a/arch/sparc/lib/divdi3.S b/arch/sparc/lib/divdi3.S
index a7389409d9fa..4ba901acd572 100644
--- a/arch/sparc/lib/divdi3.S
+++ b/arch/sparc/lib/divdi3.S
@@ -5,7 +5,7 @@ This file is part of GNU CC.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.text
.align 4
.globl __divdi3
diff --git a/arch/sparc/lib/ffs.S b/arch/sparc/lib/ffs.S
index 5a11d864fa05..3a9ad8ffdfe8 100644
--- a/arch/sparc/lib/ffs.S
+++ b/arch/sparc/lib/ffs.S
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.register %g2,#scratch
diff --git a/arch/sparc/lib/fls.S b/arch/sparc/lib/fls.S
index 06b8d300bcae..ccf97fb7d8cd 100644
--- a/arch/sparc/lib/fls.S
+++ b/arch/sparc/lib/fls.S
@@ -5,8 +5,8 @@
* and onward.
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
.register %g2, #scratch
diff --git a/arch/sparc/lib/fls64.S b/arch/sparc/lib/fls64.S
index c83e22ae9586..87005b67d378 100644
--- a/arch/sparc/lib/fls64.S
+++ b/arch/sparc/lib/fls64.S
@@ -5,8 +5,8 @@
* and onward.
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
.register %g2, #scratch
diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S
index 0ddbbb031822..eebee59b0655 100644
--- a/arch/sparc/lib/hweight.S
+++ b/arch/sparc/lib/hweight.S
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
.align 32
diff --git a/arch/sparc/lib/iomap.c b/arch/sparc/lib/iomap.c
index c9da9f139694..f3a8cd491ce0 100644
--- a/arch/sparc/lib/iomap.c
+++ b/arch/sparc/lib/iomap.c
@@ -19,8 +19,10 @@ void ioport_unmap(void __iomem *addr)
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
+#ifdef CONFIG_PCI
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
/* nothing to do */
}
EXPORT_SYMBOL(pci_iounmap);
+#endif
diff --git a/arch/sparc/lib/ipcsum.S b/arch/sparc/lib/ipcsum.S
index 531d89c9d5d9..7fa8fd4b795a 100644
--- a/arch/sparc/lib/ipcsum.S
+++ b/arch/sparc/lib/ipcsum.S
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */
diff --git a/arch/sparc/lib/locks.S b/arch/sparc/lib/locks.S
index 9a1289a3fb28..47a39f4384a2 100644
--- a/arch/sparc/lib/locks.S
+++ b/arch/sparc/lib/locks.S
@@ -7,11 +7,11 @@
* Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
+#include <linux/export.h>
#include <asm/ptrace.h>
#include <asm/psr.h>
#include <asm/smp.h>
#include <asm/spinlock.h>
-#include <asm/export.h>
.text
.align 4
diff --git a/arch/sparc/lib/lshrdi3.S b/arch/sparc/lib/lshrdi3.S
index 509ca6682da8..09bf581a0ba5 100644
--- a/arch/sparc/lib/lshrdi3.S
+++ b/arch/sparc/lib/lshrdi3.S
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
ENTRY(__lshrdi3)
cmp %o2, 0
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
index deba6fa0bc78..f7f7910eb41e 100644
--- a/arch/sparc/lib/mcount.S
+++ b/arch/sparc/lib/mcount.S
@@ -6,8 +6,8 @@
* This can also be tweaked for kernel stack overflow detection.
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
/*
* This is the main variant and is called by C code. GCC's -pg option
diff --git a/arch/sparc/lib/memcmp.S b/arch/sparc/lib/memcmp.S
index a18076ef5af1..c87e8000feba 100644
--- a/arch/sparc/lib/memcmp.S
+++ b/arch/sparc/lib/memcmp.S
@@ -5,9 +5,9 @@
* Copyright (C) 2000, 2008 David S. Miller (davem@davemloft.net)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asm.h>
-#include <asm/export.h>
.text
ENTRY(memcmp)
diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
index ee823d8c9215..57b1ae0f5924 100644
--- a/arch/sparc/lib/memcpy.S
+++ b/arch/sparc/lib/memcpy.S
@@ -8,7 +8,8 @@
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
-#include <asm/export.h>
+#include <linux/export.h>
+
#define FUNC(x) \
.globl x; \
.type x,@function; \
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
index 3132b6316144..543dda7b9dac 100644
--- a/arch/sparc/lib/memmove.S
+++ b/arch/sparc/lib/memmove.S
@@ -5,8 +5,8 @@
* Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
ENTRY(memmove) /* o0=dst o1=src o2=len */
diff --git a/arch/sparc/lib/memscan_32.S b/arch/sparc/lib/memscan_32.S
index c4c2d5b3a2e9..5386a3a20019 100644
--- a/arch/sparc/lib/memscan_32.S
+++ b/arch/sparc/lib/memscan_32.S
@@ -5,7 +5,7 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
-#include <asm/export.h>
+#include <linux/export.h>
/* In essence, this is just a fancy strlen. */
diff --git a/arch/sparc/lib/memscan_64.S b/arch/sparc/lib/memscan_64.S
index 36dd638905c3..70a4f21057f2 100644
--- a/arch/sparc/lib/memscan_64.S
+++ b/arch/sparc/lib/memscan_64.S
@@ -6,7 +6,7 @@
* Copyright (C) 1998 David S. Miller (davem@redhat.com)
*/
- #include <asm/export.h>
+#include <linux/export.h>
#define HI_MAGIC 0x8080808080808080
#define LO_MAGIC 0x0101010101010101
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
index b89d42b29e34..a33419dbb464 100644
--- a/arch/sparc/lib/memset.S
+++ b/arch/sparc/lib/memset.S
@@ -9,8 +9,8 @@
* clear_user.
*/
+#include <linux/export.h>
#include <asm/ptrace.h>
-#include <asm/export.h>
/* Work around cpp -rob */
#define ALLOC #alloc
@@ -19,7 +19,7 @@
98: x,y; \
.section .fixup,ALLOC,EXECINSTR; \
.align 4; \
-99: ba 30f; \
+99: retl; \
a, b, %o0; \
.section __ex_table,ALLOC; \
.align 4; \
@@ -27,35 +27,44 @@
.text; \
.align 4
-#define EXT(start,end,handler) \
+#define STORE(source, base, offset, n) \
+98: std source, [base + offset + n]; \
+ .section .fixup,ALLOC,EXECINSTR; \
+ .align 4; \
+99: ba 30f; \
+ sub %o3, n - offset, %o3; \
.section __ex_table,ALLOC; \
.align 4; \
- .word start, 0, end, handler; \
+ .word 98b, 99b; \
.text; \
- .align 4
+ .align 4;
+
+#define STORE_LAST(source, base, offset, n) \
+ EX(std source, [base - offset - n], \
+ add %o1, offset + n);
/* Please don't change these macros, unless you change the logic
* in the .fixup section below as well.
* Store 64 bytes at (BASE + OFFSET) using value SOURCE. */
-#define ZERO_BIG_BLOCK(base, offset, source) \
- std source, [base + offset + 0x00]; \
- std source, [base + offset + 0x08]; \
- std source, [base + offset + 0x10]; \
- std source, [base + offset + 0x18]; \
- std source, [base + offset + 0x20]; \
- std source, [base + offset + 0x28]; \
- std source, [base + offset + 0x30]; \
- std source, [base + offset + 0x38];
+#define ZERO_BIG_BLOCK(base, offset, source) \
+ STORE(source, base, offset, 0x00); \
+ STORE(source, base, offset, 0x08); \
+ STORE(source, base, offset, 0x10); \
+ STORE(source, base, offset, 0x18); \
+ STORE(source, base, offset, 0x20); \
+ STORE(source, base, offset, 0x28); \
+ STORE(source, base, offset, 0x30); \
+ STORE(source, base, offset, 0x38);
#define ZERO_LAST_BLOCKS(base, offset, source) \
- std source, [base - offset - 0x38]; \
- std source, [base - offset - 0x30]; \
- std source, [base - offset - 0x28]; \
- std source, [base - offset - 0x20]; \
- std source, [base - offset - 0x18]; \
- std source, [base - offset - 0x10]; \
- std source, [base - offset - 0x08]; \
- std source, [base - offset - 0x00];
+ STORE_LAST(source, base, offset, 0x38); \
+ STORE_LAST(source, base, offset, 0x30); \
+ STORE_LAST(source, base, offset, 0x28); \
+ STORE_LAST(source, base, offset, 0x20); \
+ STORE_LAST(source, base, offset, 0x18); \
+ STORE_LAST(source, base, offset, 0x10); \
+ STORE_LAST(source, base, offset, 0x08); \
+ STORE_LAST(source, base, offset, 0x00);
.text
.align 4
@@ -68,8 +77,6 @@ __bzero_begin:
.globl memset
EXPORT_SYMBOL(__bzero)
EXPORT_SYMBOL(memset)
- .globl __memset_start, __memset_end
-__memset_start:
memset:
mov %o0, %g1
mov 1, %g4
@@ -122,8 +129,6 @@ __bzero:
ZERO_BIG_BLOCK(%o0, 0x00, %g2)
subcc %o3, 128, %o3
ZERO_BIG_BLOCK(%o0, 0x40, %g2)
-11:
- EXT(10b, 11b, 20f)
bne 10b
add %o0, 128, %o0
@@ -138,7 +143,6 @@ __bzero:
jmp %o4
add %o0, %o2, %o0
-12:
ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
13:
@@ -181,37 +185,13 @@ __bzero:
5:
retl
clr %o0
-__memset_end:
.section .fixup,#alloc,#execinstr
.align 4
-20:
- cmp %g2, 8
- bleu 1f
- and %o1, 0x7f, %o1
- sub %g2, 9, %g2
- add %o3, 64, %o3
-1:
- sll %g2, 3, %g2
- add %o3, %o1, %o0
- b 30f
- sub %o0, %g2, %o0
-21:
- mov 8, %o0
- and %o1, 7, %o1
- sub %o0, %g2, %o0
- sll %o0, 3, %o0
- b 30f
- add %o0, %o1, %o0
30:
-/* %o4 is faulting address, %o5 is %pc where fault occurred */
- save %sp, -104, %sp
- mov %i5, %o0
- mov %i7, %o1
- call lookup_fault
- mov %i4, %o2
- ret
- restore
+ and %o1, 0x7f, %o1
+ retl
+ add %o3, %o1, %o0
.globl __bzero_end
__bzero_end:
diff --git a/arch/sparc/lib/muldi3.S b/arch/sparc/lib/muldi3.S
index 53054dee66d6..7e1e8cd30a22 100644
--- a/arch/sparc/lib/muldi3.S
+++ b/arch/sparc/lib/muldi3.S
@@ -5,7 +5,7 @@ This file is part of GNU CC.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.text
.align 4
.globl __muldi3
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
index 2f187b299345..5bb4c122a2cf 100644
--- a/arch/sparc/lib/multi3.S
+++ b/arch/sparc/lib/multi3.S
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
.align 4
diff --git a/arch/sparc/lib/strlen.S b/arch/sparc/lib/strlen.S
index dd111bbad5df..27478b3f1647 100644
--- a/arch/sparc/lib/strlen.S
+++ b/arch/sparc/lib/strlen.S
@@ -6,9 +6,9 @@
* Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asm.h>
-#include <asm/export.h>
#define LO_MAGIC 0x01010101
#define HI_MAGIC 0x80808080
diff --git a/arch/sparc/lib/strncmp_32.S b/arch/sparc/lib/strncmp_32.S
index 794733f036b6..387bbf621548 100644
--- a/arch/sparc/lib/strncmp_32.S
+++ b/arch/sparc/lib/strncmp_32.S
@@ -4,8 +4,8 @@
* generic strncmp routine.
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
ENTRY(strncmp)
diff --git a/arch/sparc/lib/strncmp_64.S b/arch/sparc/lib/strncmp_64.S
index 3d37d65f674c..76c1207ecf5a 100644
--- a/arch/sparc/lib/strncmp_64.S
+++ b/arch/sparc/lib/strncmp_64.S
@@ -5,9 +5,9 @@
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asi.h>
-#include <asm/export.h>
.text
ENTRY(strncmp)
diff --git a/arch/sparc/lib/ucmpdi2.c b/arch/sparc/lib/ucmpdi2.c
deleted file mode 100644
index 82c1cccb1264..000000000000
--- a/arch/sparc/lib/ucmpdi2.c
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/module.h>
-#include "libgcc.h"
-
-word_type __ucmpdi2(unsigned long long a, unsigned long long b)
-{
- const DWunion au = {.ll = a};
- const DWunion bu = {.ll = b};
-
- if ((unsigned int) au.s.high < (unsigned int) bu.s.high)
- return 0;
- else if ((unsigned int) au.s.high > (unsigned int) bu.s.high)
- return 2;
- if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
- return 0;
- else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
- return 2;
- return 1;
-}
-EXPORT_SYMBOL(__ucmpdi2);
diff --git a/arch/sparc/lib/xor.S b/arch/sparc/lib/xor.S
index f6af7c7ee6fc..35461e3b2a9b 100644
--- a/arch/sparc/lib/xor.S
+++ b/arch/sparc/lib/xor.S
@@ -9,12 +9,12 @@
* Copyright (C) 2006 David S. Miller <davem@davemloft.net>
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
-#include <asm/export.h>
/*
* Requirements: