summaryrefslogtreecommitdiff
path: root/arch/arm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/lib')
-rw-r--r--arch/arm/lib/Makefile6
-rw-r--r--arch/arm/lib/backtrace-clang.S25
-rw-r--r--arch/arm/lib/backtrace.S24
-rw-r--r--arch/arm/lib/bitops.h14
-rw-r--r--arch/arm/lib/call_with_stack.S35
-rw-r--r--arch/arm/lib/copy_from_user.S18
-rw-r--r--arch/arm/lib/copy_template.S67
-rw-r--r--arch/arm/lib/copy_to_user.S16
-rw-r--r--arch/arm/lib/csumpartialcopy.S4
-rw-r--r--arch/arm/lib/csumpartialcopygeneric.S1
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S26
-rw-r--r--arch/arm/lib/delay-loop.S4
-rw-r--r--arch/arm/lib/error-inject.c10
-rw-r--r--arch/arm/lib/findbit.S232
-rw-r--r--arch/arm/lib/memcpy.S17
-rw-r--r--arch/arm/lib/memmove.S66
-rw-r--r--arch/arm/lib/memset.S12
-rw-r--r--arch/arm/lib/testchangebit.S4
-rw-r--r--arch/arm/lib/testclearbit.S4
-rw-r--r--arch/arm/lib/testsetbit.S4
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c42
-rw-r--r--arch/arm/lib/xor-neon.c12
22 files changed, 300 insertions, 343 deletions
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 6d2ba454f25b..650404be6768 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -36,10 +36,6 @@ else
lib-y += io-readsw-armv4.o io-writesw-armv4.o
endif
-ifeq ($(CONFIG_ARCH_RPC),y)
- AFLAGS_delay-loop.o += -march=armv4
-endif
-
$(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
$(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
@@ -48,3 +44,5 @@ ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
CFLAGS_xor-neon.o += $(NEON_FLAGS)
obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
endif
+
+obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/arm/lib/backtrace-clang.S b/arch/arm/lib/backtrace-clang.S
index 2ff375144b55..290c52a60fc6 100644
--- a/arch/arm/lib/backtrace-clang.S
+++ b/arch/arm/lib/backtrace-clang.S
@@ -17,6 +17,7 @@
#define sv_pc r6
#define mask r7
#define sv_lr r8
+#define loglvl r9
ENTRY(c_backtrace)
@@ -99,6 +100,7 @@ ENDPROC(c_backtrace)
@ to ensure 8 byte alignment
movs frame, r0 @ if frame pointer is zero
beq no_frame @ we have no stack frames
+ mov loglvl, r2
tst r1, #0x10 @ 26 or 32-bit mode?
moveq mask, #0xfc000003
movne mask, #0 @ mask for 32-bit
@@ -142,7 +144,7 @@ for_each_frame: tst frame, mask @ Check for address exceptions
*/
1003: ldr sv_lr, [sv_fp, #4] @ get saved lr from next frame
- ldr r0, [sv_lr, #-4] @ get call instruction
+1004: ldr r0, [sv_lr, #-4] @ get call instruction
ldr r3, .Lopcode+4
and r2, r3, r0 @ is this a bl call
teq r2, r3
@@ -162,11 +164,12 @@ finished_setup:
/*
* Print the function (sv_pc) and where it was called from (sv_lr).
*/
-1004: mov r0, sv_pc
+ mov r0, sv_pc
mov r1, sv_lr
mov r2, frame
bic r1, r1, mask @ mask PC/LR for the mode
+ mov r3, loglvl
bl dump_backtrace_entry
/*
@@ -183,6 +186,7 @@ finished_setup:
ldr r0, [frame] @ locals are stored in
@ the preceding frame
subeq r0, r0, #4
+ mov r2, loglvl
bleq dump_backtrace_stm @ dump saved registers
/*
@@ -193,11 +197,20 @@ finished_setup:
cmp sv_fp, frame @ next frame must be
mov frame, sv_fp @ above the current frame
+#ifdef CONFIG_IRQSTACKS
+ @
+ @ Kernel stacks may be discontiguous in memory. If the next
+ @ frame is below the previous frame, accept it as long as it
+ @ lives in kernel memory.
+ @
+ cmpls sv_fp, #PAGE_OFFSET
+#endif
bhi for_each_frame
1006: adr r0, .Lbad
- mov r1, frame
- bl printk
+ mov r1, loglvl
+ mov r2, frame
+ bl _printk
no_frame: ldmfd sp!, {r4 - r9, fp, pc}
ENDPROC(c_backtrace)
.pushsection __ex_table,"a"
@@ -205,11 +218,11 @@ ENDPROC(c_backtrace)
.long 1001b, 1006b
.long 1002b, 1006b
.long 1003b, 1006b
- .long 1004b, 1006b
+ .long 1004b, finished_setup
.long 1005b, 1006b
.popsection
-.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
+.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n"
.align
.Lopcode: .word 0xe92d4800 >> 11 @ stmfd sp!, {... fp, lr}
.word 0x0b000000 @ bl if these bits are set
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index 582925238d65..293a2716bd20 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -18,6 +18,7 @@
#define sv_pc r6
#define mask r7
#define offset r8
+#define loglvl r9
ENTRY(c_backtrace)
@@ -25,9 +26,10 @@ ENTRY(c_backtrace)
ret lr
ENDPROC(c_backtrace)
#else
- stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location...
+ stmfd sp!, {r4 - r9, lr} @ Save an extra register so we have a location...
movs frame, r0 @ if frame pointer is zero
beq no_frame @ we have no stack frames
+ mov loglvl, r2
tst r1, #0x10 @ 26 or 32-bit mode?
ARM( moveq mask, #0xfc000003 )
@@ -73,6 +75,7 @@ for_each_frame: tst frame, mask @ Check for address exceptions
ldr r1, [frame, #-4] @ get saved lr
mov r2, frame
bic r1, r1, mask @ mask PC/LR for the mode
+ mov r3, loglvl
bl dump_backtrace_entry
ldr r1, [sv_pc, #-4] @ if stmfd sp!, {args} exists,
@@ -80,12 +83,14 @@ for_each_frame: tst frame, mask @ Check for address exceptions
teq r3, r1, lsr #11
ldreq r0, [frame, #-8] @ get sp
subeq r0, r0, #4 @ point at the last arg
+ mov r2, loglvl
bleq dump_backtrace_stm @ dump saved registers
1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc}
ldr r3, .Ldsi @ instruction exists,
teq r3, r1, lsr #11
subeq r0, frame, #16
+ mov r2, loglvl
bleq dump_backtrace_stm @ dump saved registers
teq sv_fp, #0 @ zero saved fp means
@@ -93,12 +98,21 @@ for_each_frame: tst frame, mask @ Check for address exceptions
cmp sv_fp, frame @ next frame must be
mov frame, sv_fp @ above the current frame
+#ifdef CONFIG_IRQSTACKS
+ @
+ @ Kernel stacks may be discontiguous in memory. If the next
+ @ frame is below the previous frame, accept it as long as it
+ @ lives in kernel memory.
+ @
+ cmpls sv_fp, #PAGE_OFFSET
+#endif
bhi for_each_frame
1006: adr r0, .Lbad
- mov r1, frame
- bl printk
-no_frame: ldmfd sp!, {r4 - r8, pc}
+ mov r1, loglvl
+ mov r2, frame
+ bl _printk
+no_frame: ldmfd sp!, {r4 - r9, pc}
ENDPROC(c_backtrace)
.pushsection __ex_table,"a"
@@ -109,7 +123,7 @@ ENDPROC(c_backtrace)
.long 1004b, 1006b
.popsection
-.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
+.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n"
.align
.Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc}
.word 0xe92d0000 >> 11 @ stmfd sp!, {}
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 95bd35991288..f069d1b2318e 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -28,7 +28,7 @@ UNWIND( .fnend )
ENDPROC(\name )
.endm
- .macro testop, name, instr, store
+ .macro __testop, name, instr, store, barrier
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3
@@ -38,7 +38,7 @@ UNWIND( .fnstart )
mov r0, r0, lsr #5
add r1, r1, r0, lsl #2 @ Get word offset
mov r3, r2, lsl r3 @ create mask
- smp_dmb
+ \barrier
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
.arch_extension mp
ALT_SMP(W(pldw) [r1])
@@ -50,13 +50,21 @@ UNWIND( .fnstart )
strex ip, r2, [r1]
cmp ip, #0
bne 1b
- smp_dmb
+ \barrier
cmp r0, #0
movne r0, #1
2: bx lr
UNWIND( .fnend )
ENDPROC(\name )
.endm
+
+ .macro testop, name, instr, store
+ __testop \name, \instr, \store, smp_dmb
+ .endm
+
+ .macro sync_testop, name, instr, store
+ __testop \name, \instr, \store, __smp_dmb
+ .endm
#else
.macro bitop, name, instr
ENTRY( \name )
diff --git a/arch/arm/lib/call_with_stack.S b/arch/arm/lib/call_with_stack.S
index 28b0341ae786..5030d4e8d126 100644
--- a/arch/arm/lib/call_with_stack.S
+++ b/arch/arm/lib/call_with_stack.S
@@ -8,25 +8,44 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/unwind.h>
/*
* void call_with_stack(void (*fn)(void *), void *arg, void *sp)
*
* Change the stack to that pointed at by sp, then invoke fn(arg) with
* the new stack.
+ *
+ * The sequence below follows the APCS frame convention for frame pointer
+ * unwinding, and implements the unwinder annotations needed by the EABI
+ * unwinder.
*/
-ENTRY(call_with_stack)
- str sp, [r2, #-4]!
- str lr, [r2, #-4]!
+ENTRY(call_with_stack)
+#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
+ mov ip, sp
+ push {fp, ip, lr, pc}
+ sub fp, ip, #4
+#else
+UNWIND( .fnstart )
+UNWIND( .save {fpreg, lr} )
+ push {fpreg, lr}
+UNWIND( .setfp fpreg, sp )
+ mov fpreg, sp
+#endif
mov sp, r2
mov r2, r0
mov r0, r1
- badr lr, 1f
- ret r2
+ bl_r r2
-1: ldr lr, [sp]
- ldr sp, [sp, #4]
- ret lr
+#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
+ ldmdb fp, {fp, sp, pc}
+#else
+ mov sp, fpreg
+ pop {fpreg, pc}
+UNWIND( .fnend )
+#endif
+ .globl call_with_stack_end
+call_with_stack_end:
ENDPROC(call_with_stack)
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 95b2e1ce559c..270de7debd0f 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -91,26 +91,22 @@
strb\cond \reg, [\ptr], #1
.endm
- .macro enter reg1 reg2
+ .macro enter regs:vararg
mov r3, #0
- stmdb sp!, {r0, r2, r3, \reg1, \reg2}
+UNWIND( .save {r0, r2, r3, \regs} )
+ stmdb sp!, {r0, r2, r3, \regs}
.endm
- .macro usave reg1 reg2
- UNWIND( .save {r0, r2, r3, \reg1, \reg2} )
- .endm
-
- .macro exit reg1 reg2
+ .macro exit regs:vararg
add sp, sp, #8
- ldmfd sp!, {r0, \reg1, \reg2}
+ ldmfd sp!, {r0, \regs}
.endm
.text
ENTRY(arm_copy_from_user)
#ifdef CONFIG_CPU_SPECTRE
- get_thread_info r3
- ldr r3, [r3, #TI_ADDR_LIMIT]
+ ldr r3, =TASK_SIZE
uaccess_mask_range_ptr r1, r2, r3, ip
#endif
@@ -118,7 +114,7 @@ ENTRY(arm_copy_from_user)
ENDPROC(arm_copy_from_user)
- .pushsection .fixup,"ax"
+ .pushsection .text.fixup,"ax"
.align 0
copy_abort_preamble
ldmfd sp!, {r1, r2, r3}
diff --git a/arch/arm/lib/copy_template.S b/arch/arm/lib/copy_template.S
index 810a805d36dc..8fbafb074fe9 100644
--- a/arch/arm/lib/copy_template.S
+++ b/arch/arm/lib/copy_template.S
@@ -69,13 +69,10 @@
* than one 32bit instruction in Thumb-2)
*/
-
- UNWIND( .fnstart )
- enter r4, lr
- UNWIND( .fnend )
-
UNWIND( .fnstart )
- usave r4, lr @ in first stmdb block
+ enter r4, UNWIND(fpreg,) lr
+ UNWIND( .setfp fpreg, sp )
+ UNWIND( mov fpreg, sp )
subs r2, r2, #4
blt 8f
@@ -86,12 +83,7 @@
bne 10f
1: subs r2, r2, #(28)
- stmfd sp!, {r5 - r8}
- UNWIND( .fnend )
-
- UNWIND( .fnstart )
- usave r4, lr
- UNWIND( .save {r5 - r8} ) @ in second stmfd block
+ stmfd sp!, {r5, r6, r8, r9}
blt 5f
CALGN( ands ip, r0, #31 )
@@ -110,9 +102,9 @@
PLD( pld [r1, #92] )
3: PLD( pld [r1, #124] )
-4: ldr8w r1, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f
+4: ldr8w r1, r3, r4, r5, r6, r8, r9, ip, lr, abort=20f
subs r2, r2, #32
- str8w r0, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f
+ str8w r0, r3, r4, r5, r6, r8, r9, ip, lr, abort=20f
bge 3b
PLD( cmn r2, #96 )
PLD( bge 4b )
@@ -132,8 +124,8 @@
ldr1w r1, r4, abort=20f
ldr1w r1, r5, abort=20f
ldr1w r1, r6, abort=20f
- ldr1w r1, r7, abort=20f
ldr1w r1, r8, abort=20f
+ ldr1w r1, r9, abort=20f
ldr1w r1, lr, abort=20f
#if LDR1W_SHIFT < STR1W_SHIFT
@@ -150,17 +142,14 @@
str1w r0, r4, abort=20f
str1w r0, r5, abort=20f
str1w r0, r6, abort=20f
- str1w r0, r7, abort=20f
str1w r0, r8, abort=20f
+ str1w r0, r9, abort=20f
str1w r0, lr, abort=20f
CALGN( bcs 2b )
-7: ldmfd sp!, {r5 - r8}
- UNWIND( .fnend ) @ end of second stmfd block
+7: ldmfd sp!, {r5, r6, r8, r9}
- UNWIND( .fnstart )
- usave r4, lr @ still in first stmdb block
8: movs r2, r2, lsl #31
ldr1b r1, r3, ne, abort=21f
ldr1b r1, r4, cs, abort=21f
@@ -169,7 +158,7 @@
str1b r0, r4, cs, abort=21f
str1b r0, ip, cs, abort=21f
- exit r4, pc
+ exit r4, UNWIND(fpreg,) pc
9: rsb ip, ip, #4
cmp ip, #2
@@ -189,13 +178,10 @@
ldr1w r1, lr, abort=21f
beq 17f
bgt 18f
- UNWIND( .fnend )
.macro forward_copy_shift pull push
- UNWIND( .fnstart )
- usave r4, lr @ still in first stmdb block
subs r2, r2, #28
blt 14f
@@ -205,12 +191,8 @@
CALGN( subcc r2, r2, ip )
CALGN( bcc 15f )
-11: stmfd sp!, {r5 - r9}
- UNWIND( .fnend )
+11: stmfd sp!, {r5, r6, r8 - r10}
- UNWIND( .fnstart )
- usave r4, lr
- UNWIND( .save {r5 - r9} ) @ in new second stmfd block
PLD( pld [r1, #0] )
PLD( subs r2, r2, #96 )
PLD( pld [r1, #28] )
@@ -219,35 +201,32 @@
PLD( pld [r1, #92] )
12: PLD( pld [r1, #124] )
-13: ldr4w r1, r4, r5, r6, r7, abort=19f
+13: ldr4w r1, r4, r5, r6, r8, abort=19f
mov r3, lr, lspull #\pull
subs r2, r2, #32
- ldr4w r1, r8, r9, ip, lr, abort=19f
+ ldr4w r1, r9, r10, ip, lr, abort=19f
orr r3, r3, r4, lspush #\push
mov r4, r4, lspull #\pull
orr r4, r4, r5, lspush #\push
mov r5, r5, lspull #\pull
orr r5, r5, r6, lspush #\push
mov r6, r6, lspull #\pull
- orr r6, r6, r7, lspush #\push
- mov r7, r7, lspull #\pull
- orr r7, r7, r8, lspush #\push
+ orr r6, r6, r8, lspush #\push
mov r8, r8, lspull #\pull
orr r8, r8, r9, lspush #\push
mov r9, r9, lspull #\pull
- orr r9, r9, ip, lspush #\push
+ orr r9, r9, r10, lspush #\push
+ mov r10, r10, lspull #\pull
+ orr r10, r10, ip, lspush #\push
mov ip, ip, lspull #\pull
orr ip, ip, lr, lspush #\push
- str8w r0, r3, r4, r5, r6, r7, r8, r9, ip, abort=19f
+ str8w r0, r3, r4, r5, r6, r8, r9, r10, ip, abort=19f
bge 12b
PLD( cmn r2, #96 )
PLD( bge 13b )
- ldmfd sp!, {r5 - r9}
- UNWIND( .fnend ) @ end of the second stmfd block
+ ldmfd sp!, {r5, r6, r8 - r10}
- UNWIND( .fnstart )
- usave r4, lr @ still in first stmdb block
14: ands ip, r2, #28
beq 16f
@@ -262,7 +241,6 @@
16: sub r1, r1, #(\push / 8)
b 8b
- UNWIND( .fnend )
.endm
@@ -273,6 +251,7 @@
18: forward_copy_shift pull=24 push=8
+ UNWIND( .fnend )
/*
* Abort preamble and completion macros.
@@ -282,13 +261,13 @@
*/
.macro copy_abort_preamble
-19: ldmfd sp!, {r5 - r9}
+19: ldmfd sp!, {r5, r6, r8 - r10}
b 21f
-20: ldmfd sp!, {r5 - r8}
+20: ldmfd sp!, {r5, r6, r8, r9}
21:
.endm
.macro copy_abort_end
- ldmfd sp!, {r4, pc}
+ ldmfd sp!, {r4, UNWIND(fpreg,) pc}
.endm
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index ebfe4cb3d912..fac49e57cc0b 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -90,18 +90,15 @@
strusr \reg, \ptr, 1, \cond, abort=\abort
.endm
- .macro enter reg1 reg2
+ .macro enter regs:vararg
mov r3, #0
- stmdb sp!, {r0, r2, r3, \reg1, \reg2}
+UNWIND( .save {r0, r2, r3, \regs} )
+ stmdb sp!, {r0, r2, r3, \regs}
.endm
- .macro usave reg1 reg2
- UNWIND( .save {r0, r2, r3, \reg1, \reg2} )
- .endm
-
- .macro exit reg1 reg2
+ .macro exit regs:vararg
add sp, sp, #8
- ldmfd sp!, {r0, \reg1, \reg2}
+ ldmfd sp!, {r0, \regs}
.endm
.text
@@ -109,8 +106,7 @@
ENTRY(__copy_to_user_std)
WEAK(arm_copy_to_user)
#ifdef CONFIG_CPU_SPECTRE
- get_thread_info r3
- ldr r3, [r3, #TI_ADDR_LIMIT]
+ ldr r3, =TASK_SIZE
uaccess_mask_range_ptr r0, r2, r3, ip
#endif
diff --git a/arch/arm/lib/csumpartialcopy.S b/arch/arm/lib/csumpartialcopy.S
index 184d97254a7a..1ca6aadd649c 100644
--- a/arch/arm/lib/csumpartialcopy.S
+++ b/arch/arm/lib/csumpartialcopy.S
@@ -9,8 +9,8 @@
.text
-/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len, __u32 sum)
- * Params : r0 = src, r1 = dst, r2 = len, r3 = checksum
+/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len)
+ * Params : r0 = src, r1 = dst, r2 = len
* Returns : r0 = new checksum
*/
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index 0b706a39a677..0fd5c10e90a7 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -86,6 +86,7 @@ sum .req r3
FN_ENTRY
save_regs
+ mov sum, #-1
cmp len, #8 @ Ensure that we have at least
blo .Lless8 @ 8 bytes to copy.
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 6bd3a93eaa3c..6928781e6bee 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -62,9 +62,9 @@
/*
* unsigned int
- * csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *err_ptr)
- * r0 = src, r1 = dst, r2 = len, r3 = sum, [sp] = *err_ptr
- * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
+ * csum_partial_copy_from_user(const char *src, char *dst, int len)
+ * r0 = src, r1 = dst, r2 = len
+ * Returns : r0 = checksum or 0
*/
#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
@@ -73,25 +73,11 @@
#include "csumpartialcopygeneric.S"
/*
- * FIXME: minor buglet here
- * We don't return the checksum for the data present in the buffer. To do
- * so properly, we would have to add in whatever registers were loaded before
- * the fault, which, with the current asm above is not predictable.
+ * We report fault by returning 0 csum - impossible in normal case, since
+ * we start with 0xffffffff for initial sum.
*/
.pushsection .text.fixup,"ax"
.align 4
-9001: mov r4, #-EFAULT
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- ldr r5, [sp, #9*4] @ *err_ptr
-#else
- ldr r5, [sp, #8*4] @ *err_ptr
-#endif
- str r4, [r5]
- ldmia sp, {r1, r2} @ retrieve dst, len
- add r2, r2, r1
- mov r0, #0 @ zero the buffer
-9002: teq r2, r1
- strbne r0, [r1], #1
- bne 9002b
+9001: mov r0, #0
load_regs
.popsection
diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S
index 3ccade0f8130..3ac05177d097 100644
--- a/arch/arm/lib/delay-loop.S
+++ b/arch/arm/lib/delay-loop.S
@@ -8,6 +8,10 @@
#include <asm/assembler.h>
#include <asm/delay.h>
+#ifdef CONFIG_ARCH_RPC
+ .arch armv4
+#endif
+
.text
.LC0: .word loops_per_jiffy
diff --git a/arch/arm/lib/error-inject.c b/arch/arm/lib/error-inject.c
new file mode 100644
index 000000000000..5a5b405792ba
--- /dev/null
+++ b/arch/arm/lib/error-inject.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/error-injection.h>
+#include <linux/kprobes.h>
+
+void override_function_with_return(struct pt_regs *regs)
+{
+ instruction_pointer_set(regs, regs->ARM_lr);
+}
+NOKPROBE_SYMBOL(override_function_with_return);
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index b5e8b9ae4c7d..b7ac2d3c0748 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -12,182 +12,128 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/unwind.h>
.text
-/*
- * Purpose : Find a 'zero' bit
- * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit);
- */
-ENTRY(_find_first_zero_bit_le)
- teq r1, #0
- beq 3f
- mov r2, #0
-1:
- ARM( ldrb r3, [r0, r2, lsr #3] )
- THUMB( lsr r3, r2, #3 )
- THUMB( ldrb r3, [r0, r3] )
- eors r3, r3, #0xff @ invert bits
- bne .L_found @ any now set - found zero bit
- add r2, r2, #8 @ next bit pointer
-2: cmp r2, r1 @ any more?
- blo 1b
-3: mov r0, r1 @ no free bits
- ret lr
-ENDPROC(_find_first_zero_bit_le)
+#ifdef __ARMEB__
+#define SWAB_ENDIAN le
+#else
+#define SWAB_ENDIAN be
+#endif
-/*
- * Purpose : Find next 'zero' bit
- * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
- */
-ENTRY(_find_next_zero_bit_le)
+ .macro find_first, endian, set, name
+ENTRY(_find_first_\name\()bit_\endian)
+ UNWIND( .fnstart)
teq r1, #0
- beq 3b
- ands ip, r2, #7
- beq 1b @ If new byte, goto old routine
- ARM( ldrb r3, [r0, r2, lsr #3] )
- THUMB( lsr r3, r2, #3 )
- THUMB( ldrb r3, [r0, r3] )
- eor r3, r3, #0xff @ now looking for a 1 bit
- movs r3, r3, lsr ip @ shift off unused bits
- bne .L_found
- orr r2, r2, #7 @ if zero, then no bits here
- add r2, r2, #1 @ align bit pointer
- b 2b @ loop for next bit
-ENDPROC(_find_next_zero_bit_le)
-
-/*
- * Purpose : Find a 'one' bit
- * Prototype: int find_first_bit(const unsigned long *addr, unsigned int maxbit);
- */
-ENTRY(_find_first_bit_le)
- teq r1, #0
beq 3f
mov r2, #0
-1:
- ARM( ldrb r3, [r0, r2, lsr #3] )
- THUMB( lsr r3, r2, #3 )
- THUMB( ldrb r3, [r0, r3] )
- movs r3, r3
- bne .L_found @ any now set - found zero bit
- add r2, r2, #8 @ next bit pointer
+1: ldr r3, [r0], #4
+ .ifeq \set
+ mvns r3, r3 @ invert/test bits
+ .else
+ movs r3, r3 @ test bits
+ .endif
+ .ifc \endian, SWAB_ENDIAN
+ bne .L_found_swab
+ .else
+ bne .L_found @ found the bit?
+ .endif
+ add r2, r2, #32 @ next index
2: cmp r2, r1 @ any more?
blo 1b
-3: mov r0, r1 @ no free bits
+3: mov r0, r1 @ no more bits
ret lr
-ENDPROC(_find_first_bit_le)
+ UNWIND( .fnend)
+ENDPROC(_find_first_\name\()bit_\endian)
+ .endm
-/*
- * Purpose : Find next 'one' bit
- * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
- */
-ENTRY(_find_next_bit_le)
- teq r1, #0
- beq 3b
- ands ip, r2, #7
- beq 1b @ If new byte, goto old routine
- ARM( ldrb r3, [r0, r2, lsr #3] )
- THUMB( lsr r3, r2, #3 )
- THUMB( ldrb r3, [r0, r3] )
+ .macro find_next, endian, set, name
+ENTRY(_find_next_\name\()bit_\endian)
+ UNWIND( .fnstart)
+ cmp r2, r1
+ bhs 3b
+ mov ip, r2, lsr #5 @ word index
+ add r0, r0, ip, lsl #2
+ ands ip, r2, #31 @ bit position
+ beq 1b
+ ldr r3, [r0], #4
+ .ifeq \set
+ mvn r3, r3 @ invert bits
+ .endif
+ .ifc \endian, SWAB_ENDIAN
+ rev_l r3, ip
+ .if .Lrev_l_uses_tmp
+ @ we need to recompute ip because rev_l will have overwritten
+ @ it.
+ and ip, r2, #31 @ bit position
+ .endif
+ .endif
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
- orr r2, r2, #7 @ if zero, then no bits here
+ orr r2, r2, #31 @ no zero bits
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
-ENDPROC(_find_next_bit_le)
+ UNWIND( .fnend)
+ENDPROC(_find_next_\name\()bit_\endian)
+ .endm
-#ifdef __ARMEB__
+ .macro find_bit, endian, set, name
+ find_first \endian, \set, \name
+ find_next \endian, \set, \name
+ .endm
-ENTRY(_find_first_zero_bit_be)
- teq r1, #0
- beq 3f
- mov r2, #0
-1: eor r3, r2, #0x18 @ big endian byte ordering
- ARM( ldrb r3, [r0, r3, lsr #3] )
- THUMB( lsr r3, #3 )
- THUMB( ldrb r3, [r0, r3] )
- eors r3, r3, #0xff @ invert bits
- bne .L_found @ any now set - found zero bit
- add r2, r2, #8 @ next bit pointer
-2: cmp r2, r1 @ any more?
- blo 1b
-3: mov r0, r1 @ no free bits
- ret lr
-ENDPROC(_find_first_zero_bit_be)
+/* _find_first_zero_bit_le and _find_next_zero_bit_le */
+ find_bit le, 0, zero_
-ENTRY(_find_next_zero_bit_be)
- teq r1, #0
- beq 3b
- ands ip, r2, #7
- beq 1b @ If new byte, goto old routine
- eor r3, r2, #0x18 @ big endian byte ordering
- ARM( ldrb r3, [r0, r3, lsr #3] )
- THUMB( lsr r3, #3 )
- THUMB( ldrb r3, [r0, r3] )
- eor r3, r3, #0xff @ now looking for a 1 bit
- movs r3, r3, lsr ip @ shift off unused bits
- bne .L_found
- orr r2, r2, #7 @ if zero, then no bits here
- add r2, r2, #1 @ align bit pointer
- b 2b @ loop for next bit
-ENDPROC(_find_next_zero_bit_be)
+/* _find_first_bit_le and _find_next_bit_le */
+ find_bit le, 1
-ENTRY(_find_first_bit_be)
- teq r1, #0
- beq 3f
- mov r2, #0
-1: eor r3, r2, #0x18 @ big endian byte ordering
- ARM( ldrb r3, [r0, r3, lsr #3] )
- THUMB( lsr r3, #3 )
- THUMB( ldrb r3, [r0, r3] )
- movs r3, r3
- bne .L_found @ any now set - found zero bit
- add r2, r2, #8 @ next bit pointer
-2: cmp r2, r1 @ any more?
- blo 1b
-3: mov r0, r1 @ no free bits
- ret lr
-ENDPROC(_find_first_bit_be)
+#ifdef __ARMEB__
-ENTRY(_find_next_bit_be)
- teq r1, #0
- beq 3b
- ands ip, r2, #7
- beq 1b @ If new byte, goto old routine
- eor r3, r2, #0x18 @ big endian byte ordering
- ARM( ldrb r3, [r0, r3, lsr #3] )
- THUMB( lsr r3, #3 )
- THUMB( ldrb r3, [r0, r3] )
- movs r3, r3, lsr ip @ shift off unused bits
- bne .L_found
- orr r2, r2, #7 @ if zero, then no bits here
- add r2, r2, #1 @ align bit pointer
- b 2b @ loop for next bit
-ENDPROC(_find_next_bit_be)
+/* _find_first_zero_bit_be and _find_next_zero_bit_be */
+ find_bit be, 0, zero_
+
+/* _find_first_bit_be and _find_next_bit_be */
+ find_bit be, 1
#endif
/*
* One or more bits in the LSB of r3 are assumed to be set.
*/
+.L_found_swab:
+ UNWIND( .fnstart)
+ rev_l r3, ip
.L_found:
-#if __LINUX_ARM_ARCH__ >= 5
+#if __LINUX_ARM_ARCH__ >= 7
+ rbit r3, r3 @ reverse bits
+ clz r3, r3 @ count high zero bits
+ add r0, r2, r3 @ add offset of first set bit
+#elif __LINUX_ARM_ARCH__ >= 5
rsb r0, r3, #0
- and r3, r3, r0
- clz r3, r3
- rsb r3, r3, #31
- add r0, r2, r3
+ and r3, r3, r0 @ mask out lowest bit set
+ clz r3, r3 @ count high zero bits
+ rsb r3, r3, #31 @ offset of first set bit
+ add r0, r2, r3 @ add offset of first set bit
#else
- tst r3, #0x0f
+ mov ip, #~0
+ tst r3, ip, lsr #16 @ test bits 0-15
+ addeq r2, r2, #16
+ moveq r3, r3, lsr #16
+ tst r3, #0x00ff
+ addeq r2, r2, #8
+ moveq r3, r3, lsr #8
+ tst r3, #0x000f
addeq r2, r2, #4
- movne r3, r3, lsl #4
- tst r3, #0x30
+ moveq r3, r3, lsr #4
+ tst r3, #0x0003
addeq r2, r2, #2
- movne r3, r3, lsl #2
- tst r3, #0x40
+ moveq r3, r3, lsr #2
+ tst r3, #0x0001
addeq r2, r2, #1
mov r0, r2
#endif
cmp r1, r0 @ Clamp to maxbit
movlo r0, r1
ret lr
-
+ UNWIND( .fnend)
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index 09a333153dc6..90f2b645aa0d 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -42,26 +42,25 @@
strb\cond \reg, [\ptr], #1
.endm
- .macro enter reg1 reg2
- stmdb sp!, {r0, \reg1, \reg2}
+ .macro enter regs:vararg
+UNWIND( .save {r0, \regs} )
+ stmdb sp!, {r0, \regs}
.endm
- .macro usave reg1 reg2
- UNWIND( .save {r0, \reg1, \reg2} )
- .endm
-
- .macro exit reg1 reg2
- ldmfd sp!, {r0, \reg1, \reg2}
+ .macro exit regs:vararg
+ ldmfd sp!, {r0, \regs}
.endm
.text
/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
+ENTRY(__memcpy)
ENTRY(mmiocpy)
-ENTRY(memcpy)
+WEAK(memcpy)
#include "copy_template.S"
ENDPROC(memcpy)
ENDPROC(mmiocpy)
+ENDPROC(__memcpy)
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index b50e5770fb44..6410554039fd 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -24,18 +24,20 @@
* occurring in the opposite direction.
*/
-ENTRY(memmove)
+ENTRY(__memmove)
+WEAK(memmove)
UNWIND( .fnstart )
subs ip, r0, r1
cmphi r2, ip
- bls memcpy
-
- stmfd sp!, {r0, r4, lr}
+ bls __memcpy
UNWIND( .fnend )
UNWIND( .fnstart )
- UNWIND( .save {r0, r4, lr} ) @ in first stmfd block
+ UNWIND( .save {r0, r4, fpreg, lr} )
+ stmfd sp!, {r0, r4, UNWIND(fpreg,) lr}
+ UNWIND( .setfp fpreg, sp )
+ UNWIND( mov fpreg, sp )
add r1, r1, r2
add r0, r0, r2
subs r2, r2, #4
@@ -47,12 +49,7 @@ ENTRY(memmove)
bne 10f
1: subs r2, r2, #(28)
- stmfd sp!, {r5 - r8}
- UNWIND( .fnend )
-
- UNWIND( .fnstart )
- UNWIND( .save {r0, r4, lr} )
- UNWIND( .save {r5 - r8} ) @ in second stmfd block
+ stmfd sp!, {r5, r6, r8, r9}
blt 5f
CALGN( ands ip, r0, #31 )
@@ -71,9 +68,9 @@ ENTRY(memmove)
PLD( pld [r1, #-96] )
3: PLD( pld [r1, #-128] )
-4: ldmdb r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
+4: ldmdb r1!, {r3, r4, r5, r6, r8, r9, ip, lr}
subs r2, r2, #32
- stmdb r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
+ stmdb r0!, {r3, r4, r5, r6, r8, r9, ip, lr}
bge 3b
PLD( cmn r2, #96 )
PLD( bge 4b )
@@ -87,8 +84,8 @@ ENTRY(memmove)
W(ldr) r4, [r1, #-4]!
W(ldr) r5, [r1, #-4]!
W(ldr) r6, [r1, #-4]!
- W(ldr) r7, [r1, #-4]!
W(ldr) r8, [r1, #-4]!
+ W(ldr) r9, [r1, #-4]!
W(ldr) lr, [r1, #-4]!
add pc, pc, ip
@@ -98,17 +95,13 @@ ENTRY(memmove)
W(str) r4, [r0, #-4]!
W(str) r5, [r0, #-4]!
W(str) r6, [r0, #-4]!
- W(str) r7, [r0, #-4]!
W(str) r8, [r0, #-4]!
+ W(str) r9, [r0, #-4]!
W(str) lr, [r0, #-4]!
CALGN( bcs 2b )
-7: ldmfd sp!, {r5 - r8}
- UNWIND( .fnend ) @ end of second stmfd block
-
- UNWIND( .fnstart )
- UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
+7: ldmfd sp!, {r5, r6, r8, r9}
8: movs r2, r2, lsl #31
ldrbne r3, [r1, #-1]!
@@ -117,7 +110,7 @@ ENTRY(memmove)
strbne r3, [r0, #-1]!
strbcs r4, [r0, #-1]!
strbcs ip, [r0, #-1]
- ldmfd sp!, {r0, r4, pc}
+ ldmfd sp!, {r0, r4, UNWIND(fpreg,) pc}
9: cmp ip, #2
ldrbgt r3, [r1, #-1]!
@@ -136,13 +129,10 @@ ENTRY(memmove)
ldr r3, [r1, #0]
beq 17f
blt 18f
- UNWIND( .fnend )
.macro backward_copy_shift push pull
- UNWIND( .fnstart )
- UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
subs r2, r2, #28
blt 14f
@@ -151,12 +141,7 @@ ENTRY(memmove)
CALGN( subcc r2, r2, ip )
CALGN( bcc 15f )
-11: stmfd sp!, {r5 - r9}
- UNWIND( .fnend )
-
- UNWIND( .fnstart )
- UNWIND( .save {r0, r4, lr} )
- UNWIND( .save {r5 - r9} ) @ in new second stmfd block
+11: stmfd sp!, {r5, r6, r8 - r10}
PLD( pld [r1, #-4] )
PLD( subs r2, r2, #96 )
@@ -166,35 +151,31 @@ ENTRY(memmove)
PLD( pld [r1, #-96] )
12: PLD( pld [r1, #-128] )
-13: ldmdb r1!, {r7, r8, r9, ip}
+13: ldmdb r1!, {r8, r9, r10, ip}
mov lr, r3, lspush #\push
subs r2, r2, #32
ldmdb r1!, {r3, r4, r5, r6}
orr lr, lr, ip, lspull #\pull
mov ip, ip, lspush #\push
- orr ip, ip, r9, lspull #\pull
+ orr ip, ip, r10, lspull #\pull
+ mov r10, r10, lspush #\push
+ orr r10, r10, r9, lspull #\pull
mov r9, r9, lspush #\push
orr r9, r9, r8, lspull #\pull
mov r8, r8, lspush #\push
- orr r8, r8, r7, lspull #\pull
- mov r7, r7, lspush #\push
- orr r7, r7, r6, lspull #\pull
+ orr r8, r8, r6, lspull #\pull
mov r6, r6, lspush #\push
orr r6, r6, r5, lspull #\pull
mov r5, r5, lspush #\push
orr r5, r5, r4, lspull #\pull
mov r4, r4, lspush #\push
orr r4, r4, r3, lspull #\pull
- stmdb r0!, {r4 - r9, ip, lr}
+ stmdb r0!, {r4 - r6, r8 - r10, ip, lr}
bge 12b
PLD( cmn r2, #96 )
PLD( bge 13b )
- ldmfd sp!, {r5 - r9}
- UNWIND( .fnend ) @ end of the second stmfd block
-
- UNWIND( .fnstart )
- UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
+ ldmfd sp!, {r5, r6, r8 - r10}
14: ands ip, r2, #28
beq 16f
@@ -210,7 +191,6 @@ ENTRY(memmove)
16: add r1, r1, #(\pull / 8)
b 8b
- UNWIND( .fnend )
.endm
@@ -221,4 +201,6 @@ ENTRY(memmove)
18: backward_copy_shift push=24 pull=8
+ UNWIND( .fnend )
ENDPROC(memmove)
+ENDPROC(__memmove)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 6ca4535c47fb..de75ae4d5ab4 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -13,9 +13,11 @@
.text
.align 5
+ENTRY(__memset)
ENTRY(mmioset)
-ENTRY(memset)
+WEAK(memset)
UNWIND( .fnstart )
+ and r1, r1, #255 @ cast to unsigned char
ands r3, r0, #3 @ 1 unaligned?
mov ip, r0 @ preserve r0 as return value
bne 6f @ 1
@@ -27,16 +29,16 @@ UNWIND( .fnstart )
mov r3, r1
7: cmp r2, #16
blt 4f
+UNWIND( .fnend )
#if ! CALGN(1)+0
/*
* We need 2 extra registers for this loop - use r8 and the LR
*/
- stmfd sp!, {r8, lr}
-UNWIND( .fnend )
UNWIND( .fnstart )
UNWIND( .save {r8, lr} )
+ stmfd sp!, {r8, lr}
mov r8, r1
mov lr, r3
@@ -65,10 +67,9 @@ UNWIND( .fnend )
* whole cache lines at once.
*/
- stmfd sp!, {r4-r8, lr}
-UNWIND( .fnend )
UNWIND( .fnstart )
UNWIND( .save {r4-r8, lr} )
+ stmfd sp!, {r4-r8, lr}
mov r4, r1
mov r5, r3
mov r6, r1
@@ -132,6 +133,7 @@ UNWIND( .fnstart )
UNWIND( .fnend )
ENDPROC(memset)
ENDPROC(mmioset)
+ENDPROC(__memset)
ENTRY(__memset32)
UNWIND( .fnstart )
diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S
index 4ebecc67e6e0..f13fe9bc2399 100644
--- a/arch/arm/lib/testchangebit.S
+++ b/arch/arm/lib/testchangebit.S
@@ -10,3 +10,7 @@
.text
testop _test_and_change_bit, eor, str
+
+#if __LINUX_ARM_ARCH__ >= 6
+sync_testop _sync_test_and_change_bit, eor, str
+#endif
diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S
index 009afa0f5b4a..4d2c5ca620eb 100644
--- a/arch/arm/lib/testclearbit.S
+++ b/arch/arm/lib/testclearbit.S
@@ -10,3 +10,7 @@
.text
testop _test_and_clear_bit, bicne, strne
+
+#if __LINUX_ARM_ARCH__ >= 6
+sync_testop _sync_test_and_clear_bit, bicne, strne
+#endif
diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S
index f3192e55acc8..649dbab65d8d 100644
--- a/arch/arm/lib/testsetbit.S
+++ b/arch/arm/lib/testsetbit.S
@@ -10,3 +10,7 @@
.text
testop _test_and_set_bit, orreq, streq
+
+#if __LINUX_ARM_ARCH__ >= 6
+sync_testop _sync_test_and_set_bit, orreq, streq
+#endif
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index c9450982a155..2f6163f05e93 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -24,6 +24,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
{
unsigned long addr = (unsigned long)_addr;
pgd_t *pgd;
+ p4d_t *p4d;
pmd_t *pmd;
pte_t *pte;
pud_t *pud;
@@ -33,7 +34,11 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
return 0;
- pud = pud_offset(pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ if (unlikely(p4d_none(*p4d) || p4d_bad(*p4d)))
+ return 0;
+
+ pud = pud_offset(p4d, addr);
if (unlikely(pud_none(*pud) || pud_bad(*pud)))
return 0;
@@ -69,6 +74,9 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
return 0;
pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
+ if (unlikely(!pte))
+ return 0;
+
if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
!pte_write(*pte) || !pte_dirty(*pte))) {
pte_unmap_unlock(pte, ptl);
@@ -87,16 +95,11 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
unsigned long ua_flags;
int atomic;
- if (uaccess_kernel()) {
- memcpy((void *)to, from, n);
- return 0;
- }
-
/* the mmap semaphore is taken only if not in an atomic context */
atomic = faulthandler_disabled();
if (!atomic)
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
while (n) {
pte_t *pte;
spinlock_t *ptl;
@@ -104,11 +107,11 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
while (!pin_page_for_write(to, &pte, &ptl)) {
if (!atomic)
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (__put_user(0, (char __user *)to))
goto out;
if (!atomic)
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
}
tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
@@ -116,7 +119,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
tocopy = n;
ua_flags = uaccess_save_and_enable();
- memcpy((void *)to, from, tocopy);
+ __memcpy((void *)to, from, tocopy);
uaccess_restore(ua_flags);
to += tocopy;
from += tocopy;
@@ -128,7 +131,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
spin_unlock(ptl);
}
if (!atomic)
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
out:
return n;
@@ -160,22 +163,17 @@ __clear_user_memset(void __user *addr, unsigned long n)
{
unsigned long ua_flags;
- if (uaccess_kernel()) {
- memset((void *)addr, 0, n);
- return 0;
- }
-
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
while (n) {
pte_t *pte;
spinlock_t *ptl;
int tocopy;
while (!pin_page_for_write(addr, &pte, &ptl)) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (__put_user(0, (char __user *)addr))
goto out;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
}
tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
@@ -183,7 +181,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
tocopy = n;
ua_flags = uaccess_save_and_enable();
- memset((void *)addr, 0, tocopy);
+ __memset((void *)addr, 0, tocopy);
uaccess_restore(ua_flags);
addr += tocopy;
n -= tocopy;
@@ -193,7 +191,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
else
spin_unlock(ptl);
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
out:
return n;
@@ -242,7 +240,7 @@ static int __init test_size_treshold(void)
if (!dst_page)
goto no_dst;
kernel_ptr = page_address(src_page);
- user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
+ user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__PAGE_COPY));
if (!user_ptr)
goto no_vmap;
diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
index b99dd8e1c93f..522510baed49 100644
--- a/arch/arm/lib/xor-neon.c
+++ b/arch/arm/lib/xor-neon.c
@@ -17,17 +17,11 @@ MODULE_LICENSE("GPL");
/*
* Pull in the reference implementations while instructing GCC (through
* -ftree-vectorize) to attempt to exploit implicit parallelism and emit
- * NEON instructions.
+ * NEON instructions. Clang does this by default at O2 so no pragma is
+ * needed.
*/
-#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#ifdef CONFIG_CC_IS_GCC
#pragma GCC optimize "tree-vectorize"
-#else
-/*
- * While older versions of GCC do not generate incorrect code, they fail to
- * recognize the parallel nature of these functions, and emit plain ARM code,
- * which is known to be slower than the optimized ARM code in asm-arm/xor.h.
- */
-#warning This code requires at least version 4.6 of GCC
#endif
#pragma GCC diagnostic ignored "-Wunused-variable"