summaryrefslogtreecommitdiff
path: root/arch/arm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/lib')
-rw-r--r--arch/arm/lib/.gitignore4
-rw-r--r--arch/arm/lib/Makefile3
-rw-r--r--arch/arm/lib/bitops.h14
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S20
-rw-r--r--arch/arm/lib/delay-loop.S16
-rw-r--r--arch/arm/lib/memset.S1
-rw-r--r--arch/arm/lib/testchangebit.S4
-rw-r--r--arch/arm/lib/testclearbit.S4
-rw-r--r--arch/arm/lib/testsetbit.S4
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c11
-rw-r--r--arch/arm/lib/xor-neon.c1
11 files changed, 66 insertions, 16 deletions
diff --git a/arch/arm/lib/.gitignore b/arch/arm/lib/.gitignore
new file mode 100644
index 000000000000..647d7a922e68
--- /dev/null
+++ b/arch/arm/lib/.gitignore
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# This now-removed directory used to contain generated files.
+/crypto/
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 650404be6768..0ca5aae1bcc3 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -40,8 +40,7 @@ $(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
$(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
- NEON_FLAGS := -march=armv7-a -mfloat-abi=softfp -mfpu=neon
- CFLAGS_xor-neon.o += $(NEON_FLAGS)
+ CFLAGS_xor-neon.o += $(CC_FLAGS_FPU)
obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
endif
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 95bd35991288..f069d1b2318e 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -28,7 +28,7 @@ UNWIND( .fnend )
ENDPROC(\name )
.endm
- .macro testop, name, instr, store
+ .macro __testop, name, instr, store, barrier
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3
@@ -38,7 +38,7 @@ UNWIND( .fnstart )
mov r0, r0, lsr #5
add r1, r1, r0, lsl #2 @ Get word offset
mov r3, r2, lsl r3 @ create mask
- smp_dmb
+ \barrier
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
.arch_extension mp
ALT_SMP(W(pldw) [r1])
@@ -50,13 +50,21 @@ UNWIND( .fnstart )
strex ip, r2, [r1]
cmp ip, #0
bne 1b
- smp_dmb
+ \barrier
cmp r0, #0
movne r0, #1
2: bx lr
UNWIND( .fnend )
ENDPROC(\name )
.endm
+
+ .macro testop, name, instr, store
+ __testop \name, \instr, \store, smp_dmb
+ .endm
+
+ .macro sync_testop, name, instr, store
+ __testop \name, \instr, \store, __smp_dmb
+ .endm
#else
.macro bitop, name, instr
ENTRY( \name )
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 6928781e6bee..c289bde04743 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -13,7 +13,8 @@
.text
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
+
.macro save_regs
mrc p15, 0, ip, c3, c0, 0
stmfd sp!, {r1, r2, r4 - r8, ip, lr}
@@ -25,7 +26,23 @@
mcr p15, 0, ip, c3, c0, 0
ret lr
.endm
+
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+ .macro save_regs
+ mrc p15, 0, ip, c2, c0, 2 @ read TTBCR
+ stmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ uaccess_enable ip
+ .endm
+
+ .macro load_regs
+ ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ mcr p15, 0, ip, c2, c0, 2 @ restore TTBCR
+ ret lr
+ .endm
+
#else
+
.macro save_regs
stmfd sp!, {r1, r2, r4 - r8, lr}
.endm
@@ -33,6 +50,7 @@
.macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, pc}
.endm
+
#endif
.macro load1b, reg1
diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S
index 3ac05177d097..33b08ca1c242 100644
--- a/arch/arm/lib/delay-loop.S
+++ b/arch/arm/lib/delay-loop.S
@@ -5,6 +5,7 @@
* Copyright (C) 1995, 1996 Russell King
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/delay.h>
@@ -24,21 +25,26 @@
* HZ <= 1000
*/
-ENTRY(__loop_udelay)
+SYM_TYPED_FUNC_START(__loop_udelay)
ldr r2, .LC1
mul r0, r2, r0 @ r0 = delay_us * UDELAY_MULT
-ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0xfffffaf0
+ b __loop_const_udelay
+SYM_FUNC_END(__loop_udelay)
+
+SYM_TYPED_FUNC_START(__loop_const_udelay) @ 0 <= r0 <= 0xfffffaf0
ldr r2, .LC0
ldr r2, [r2]
umull r1, r0, r2, r0 @ r0-r1 = r0 * loops_per_jiffy
adds r1, r1, #0xffffffff @ rounding up ...
adcs r0, r0, r0 @ and right shift by 31
reteq lr
+ b __loop_delay
+SYM_FUNC_END(__loop_const_udelay)
.align 3
@ Delay routine
-ENTRY(__loop_delay)
+SYM_TYPED_FUNC_START(__loop_delay)
subs r0, r0, #1
#if 0
retls lr
@@ -58,6 +64,4 @@ ENTRY(__loop_delay)
#endif
bhi __loop_delay
ret lr
-ENDPROC(__loop_udelay)
-ENDPROC(__loop_const_udelay)
-ENDPROC(__loop_delay)
+SYM_FUNC_END(__loop_delay)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index d71ab61430b2..de75ae4d5ab4 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -17,6 +17,7 @@ ENTRY(__memset)
ENTRY(mmioset)
WEAK(memset)
UNWIND( .fnstart )
+ and r1, r1, #255 @ cast to unsigned char
ands r3, r0, #3 @ 1 unaligned?
mov ip, r0 @ preserve r0 as return value
bne 6f @ 1
diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S
index 4ebecc67e6e0..f13fe9bc2399 100644
--- a/arch/arm/lib/testchangebit.S
+++ b/arch/arm/lib/testchangebit.S
@@ -10,3 +10,7 @@
.text
testop _test_and_change_bit, eor, str
+
+#if __LINUX_ARM_ARCH__ >= 6
+sync_testop _sync_test_and_change_bit, eor, str
+#endif
diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S
index 009afa0f5b4a..4d2c5ca620eb 100644
--- a/arch/arm/lib/testclearbit.S
+++ b/arch/arm/lib/testclearbit.S
@@ -10,3 +10,7 @@
.text
testop _test_and_clear_bit, bicne, strne
+
+#if __LINUX_ARM_ARCH__ >= 6
+sync_testop _sync_test_and_clear_bit, bicne, strne
+#endif
diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S
index f3192e55acc8..649dbab65d8d 100644
--- a/arch/arm/lib/testsetbit.S
+++ b/arch/arm/lib/testsetbit.S
@@ -10,3 +10,7 @@
.text
testop _test_and_set_bit, orreq, streq
+
+#if __LINUX_ARM_ARCH__ >= 6
+sync_testop _sync_test_and_set_bit, orreq, streq
+#endif
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 14eecaaf295f..c0ac7796d775 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -56,10 +56,10 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
* to see that it's still huge and whether or not we will
* need to fault on write.
*/
- if (unlikely(pmd_thp_or_huge(*pmd))) {
+ if (unlikely(pmd_leaf(*pmd))) {
ptl = &current->mm->page_table_lock;
spin_lock(ptl);
- if (unlikely(!pmd_thp_or_huge(*pmd)
+ if (unlikely(!pmd_leaf(*pmd)
|| pmd_hugewillfault(*pmd))) {
spin_unlock(ptl);
return 0;
@@ -74,6 +74,9 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
return 0;
pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
+ if (unlikely(!pte))
+ return 0;
+
if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
!pte_write(*pte) || !pte_dirty(*pte))) {
pte_unmap_unlock(pte, ptl);
@@ -116,7 +119,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
tocopy = n;
ua_flags = uaccess_save_and_enable();
- memcpy((void *)to, from, tocopy);
+ __memcpy((void *)to, from, tocopy);
uaccess_restore(ua_flags);
to += tocopy;
from += tocopy;
@@ -178,7 +181,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
tocopy = n;
ua_flags = uaccess_save_and_enable();
- memset((void *)addr, 0, tocopy);
+ __memset((void *)addr, 0, tocopy);
uaccess_restore(ua_flags);
addr += tocopy;
n -= tocopy;
diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
index 522510baed49..cf57fca97908 100644
--- a/arch/arm/lib/xor-neon.c
+++ b/arch/arm/lib/xor-neon.c
@@ -8,6 +8,7 @@
#include <linux/raid/xor.h>
#include <linux/module.h>
+MODULE_DESCRIPTION("NEON accelerated XOR implementation");
MODULE_LICENSE("GPL");
#ifndef __ARM_NEON__