summaryrefslogtreecommitdiff
path: root/arch/arm64/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/lib')
-rw-r--r--arch/arm64/lib/.gitignore4
-rw-r--r--arch/arm64/lib/Makefile8
-rw-r--r--arch/arm64/lib/clear_page.S13
-rw-r--r--arch/arm64/lib/clear_user.S25
-rw-r--r--arch/arm64/lib/copy_from_user.S10
-rw-r--r--arch/arm64/lib/copy_page.S20
-rw-r--r--arch/arm64/lib/copy_template.S10
-rw-r--r--arch/arm64/lib/copy_to_user.S10
-rw-r--r--arch/arm64/lib/crc32.S160
-rw-r--r--arch/arm64/lib/csum.c2
-rw-r--r--arch/arm64/lib/delay.c2
-rw-r--r--arch/arm64/lib/insn.c106
-rw-r--r--arch/arm64/lib/memcpy.S19
-rw-r--r--arch/arm64/lib/memset.S20
-rw-r--r--arch/arm64/lib/uaccess_flushcache.c6
-rw-r--r--arch/arm64/lib/xor-neon.c10
16 files changed, 208 insertions, 217 deletions
diff --git a/arch/arm64/lib/.gitignore b/arch/arm64/lib/.gitignore
new file mode 100644
index 000000000000..647d7a922e68
--- /dev/null
+++ b/arch/arm64/lib/.gitignore
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# This now-removed directory used to contain generated files.
+/crypto/
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 29490be2546b..633e5223d944 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -7,16 +7,12 @@ lib-y := clear_user.o delay.o copy_from_user.o \
ifeq ($(CONFIG_KERNEL_MODE_NEON), y)
obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
-CFLAGS_REMOVE_xor-neon.o += -mgeneral-regs-only
-CFLAGS_xor-neon.o += -ffreestanding
-# Enable <arm_neon.h>
-CFLAGS_xor-neon.o += -isystem $(shell $(CC) -print-file-name=include)
+CFLAGS_xor-neon.o += $(CC_FLAGS_FPU)
+CFLAGS_REMOVE_xor-neon.o += $(CC_FLAGS_NO_FPU)
endif
lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
-obj-$(CONFIG_CRC32) += crc32.o
-
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
obj-$(CONFIG_ARM64_MTE) += mte.o
diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S
index ebde40e7fa2b..bd6f7d5eb6eb 100644
--- a/arch/arm64/lib/clear_page.S
+++ b/arch/arm64/lib/clear_page.S
@@ -15,6 +15,19 @@
* x0 - dest
*/
SYM_FUNC_START(__pi_clear_page)
+#ifdef CONFIG_AS_HAS_MOPS
+ .arch_extension mops
+alternative_if_not ARM64_HAS_MOPS
+ b .Lno_mops
+alternative_else_nop_endif
+
+ mov x1, #PAGE_SIZE
+ setpn [x0]!, x1!, xzr
+ setmn [x0]!, x1!, xzr
+ seten [x0]!, x1!, xzr
+ ret
+.Lno_mops:
+#endif
mrs x1, dczid_el0
tbnz x1, #4, 2f /* Branch if DC ZVA is prohibited */
and w1, w1, #0xf
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index a5a5f5b97b17..de9a303b6ad0 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -17,14 +17,27 @@
* Alignment fixed up by hardware.
*/
- .p2align 4
- // Alignment is for the loop, but since the prologue (including BTI)
- // is also 16 bytes we can keep any padding outside the function
SYM_FUNC_START(__arch_clear_user)
add x2, x0, x1
+
+#ifdef CONFIG_AS_HAS_MOPS
+ .arch_extension mops
+alternative_if_not ARM64_HAS_MOPS
+ b .Lno_mops
+alternative_else_nop_endif
+
+USER(9f, setpt [x0]!, x1!, xzr)
+USER(6f, setmt [x0]!, x1!, xzr)
+USER(6f, setet [x0]!, x1!, xzr)
+ mov x0, #0
+ ret
+.Lno_mops:
+#endif
+
subs x1, x1, #8
b.mi 2f
-1:
+
+1: .p2align 4
USER(9f, sttr xzr, [x0])
add x0, x0, #8
subs x1, x1, #8
@@ -47,6 +60,10 @@ USER(7f, sttrb wzr, [x2, #-1])
ret
// Exception fixups
+6: b.cs 9f
+ // Registers are in Option A format
+ add x0, x0, x1
+ b 9f
7: sub x0, x2, #5 // Adjust for faulting on the final byte...
8: add x0, x0, #4 // ...or the second word of the 4-7 byte case
9: sub x0, x2, x0
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 34e317907524..400057d607ec 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -52,6 +52,13 @@
stp \reg1, \reg2, [\ptr], \val
.endm
+ .macro cpy1 dst, src, count
+ .arch_extension mops
+ USER_CPY(9997f, 0, cpyfprt [\dst]!, [\src]!, \count!)
+ USER_CPY(9996f, 0, cpyfmrt [\dst]!, [\src]!, \count!)
+ USER_CPY(9996f, 0, cpyfert [\dst]!, [\src]!, \count!)
+ .endm
+
end .req x5
srcin .req x15
SYM_FUNC_START(__arch_copy_from_user)
@@ -62,6 +69,9 @@ SYM_FUNC_START(__arch_copy_from_user)
ret
// Exception fixups
+9996: b.cs 9997f
+ // Registers are in Option A format
+ add dst, dst, count
9997: cmp dst, dstin
b.ne 9998f
// Before being absolutely sure we couldn't copy anything, try harder
diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S
index c336d2ffdec5..e6374e7e5511 100644
--- a/arch/arm64/lib/copy_page.S
+++ b/arch/arm64/lib/copy_page.S
@@ -18,13 +18,19 @@
* x1 - src
*/
SYM_FUNC_START(__pi_copy_page)
-alternative_if ARM64_HAS_NO_HW_PREFETCH
- // Prefetch three cache lines ahead.
- prfm pldl1strm, [x1, #128]
- prfm pldl1strm, [x1, #256]
- prfm pldl1strm, [x1, #384]
+#ifdef CONFIG_AS_HAS_MOPS
+ .arch_extension mops
+alternative_if_not ARM64_HAS_MOPS
+ b .Lno_mops
alternative_else_nop_endif
+ mov x2, #PAGE_SIZE
+ cpypwn [x0]!, [x1]!, x2!
+ cpymwn [x0]!, [x1]!, x2!
+ cpyewn [x0]!, [x1]!, x2!
+ ret
+.Lno_mops:
+#endif
ldp x2, x3, [x1]
ldp x4, x5, [x1, #16]
ldp x6, x7, [x1, #32]
@@ -39,10 +45,6 @@ alternative_else_nop_endif
1:
tst x0, #(PAGE_SIZE - 1)
-alternative_if ARM64_HAS_NO_HW_PREFETCH
- prfm pldl1strm, [x1, #384]
-alternative_else_nop_endif
-
stnp x2, x3, [x0, #-256]
ldp x2, x3, [x1]
stnp x4, x5, [x0, #16 - 256]
diff --git a/arch/arm64/lib/copy_template.S b/arch/arm64/lib/copy_template.S
index 488df234c49a..7f2f5a0e2fb9 100644
--- a/arch/arm64/lib/copy_template.S
+++ b/arch/arm64/lib/copy_template.S
@@ -40,6 +40,16 @@ D_l .req x13
D_h .req x14
mov dst, dstin
+
+#ifdef CONFIG_AS_HAS_MOPS
+alternative_if_not ARM64_HAS_MOPS
+ b .Lno_mops
+alternative_else_nop_endif
+ cpy1 dst, src, count
+ b .Lexitfunc
+.Lno_mops:
+#endif
+
cmp count, #16
/*When memory length is less than 16, the accessed are not aligned.*/
b.lo .Ltiny15
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 802231772608..819f2e3fc7a9 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -51,6 +51,13 @@
user_stp 9997f, \reg1, \reg2, \ptr, \val
.endm
+ .macro cpy1 dst, src, count
+ .arch_extension mops
+ USER_CPY(9997f, 1, cpyfpwt [\dst]!, [\src]!, \count!)
+ USER_CPY(9996f, 1, cpyfmwt [\dst]!, [\src]!, \count!)
+ USER_CPY(9996f, 1, cpyfewt [\dst]!, [\src]!, \count!)
+ .endm
+
end .req x5
srcin .req x15
SYM_FUNC_START(__arch_copy_to_user)
@@ -61,6 +68,9 @@ SYM_FUNC_START(__arch_copy_to_user)
ret
// Exception fixups
+9996: b.cs 9997f
+ // Registers are in Option A format
+ add dst, dst, count
9997: cmp dst, dstin
b.ne 9998f
// Before being absolutely sure we couldn't copy anything, try harder
diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32.S
deleted file mode 100644
index 8340dccff46f..000000000000
--- a/arch/arm64/lib/crc32.S
+++ /dev/null
@@ -1,160 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Accelerated CRC32(C) using AArch64 CRC instructions
- *
- * Copyright (C) 2016 - 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#include <linux/linkage.h>
-#include <asm/alternative.h>
-#include <asm/assembler.h>
-
- .arch armv8-a+crc
-
- .macro byteorder, reg, be
- .if \be
-CPU_LE( rev \reg, \reg )
- .else
-CPU_BE( rev \reg, \reg )
- .endif
- .endm
-
- .macro byteorder16, reg, be
- .if \be
-CPU_LE( rev16 \reg, \reg )
- .else
-CPU_BE( rev16 \reg, \reg )
- .endif
- .endm
-
- .macro bitorder, reg, be
- .if \be
- rbit \reg, \reg
- .endif
- .endm
-
- .macro bitorder16, reg, be
- .if \be
- rbit \reg, \reg
- lsr \reg, \reg, #16
- .endif
- .endm
-
- .macro bitorder8, reg, be
- .if \be
- rbit \reg, \reg
- lsr \reg, \reg, #24
- .endif
- .endm
-
- .macro __crc32, c, be=0
- bitorder w0, \be
- cmp x2, #16
- b.lt 8f // less than 16 bytes
-
- and x7, x2, #0x1f
- and x2, x2, #~0x1f
- cbz x7, 32f // multiple of 32 bytes
-
- and x8, x7, #0xf
- ldp x3, x4, [x1]
- add x8, x8, x1
- add x1, x1, x7
- ldp x5, x6, [x8]
- byteorder x3, \be
- byteorder x4, \be
- byteorder x5, \be
- byteorder x6, \be
- bitorder x3, \be
- bitorder x4, \be
- bitorder x5, \be
- bitorder x6, \be
-
- tst x7, #8
- crc32\c\()x w8, w0, x3
- csel x3, x3, x4, eq
- csel w0, w0, w8, eq
- tst x7, #4
- lsr x4, x3, #32
- crc32\c\()w w8, w0, w3
- csel x3, x3, x4, eq
- csel w0, w0, w8, eq
- tst x7, #2
- lsr w4, w3, #16
- crc32\c\()h w8, w0, w3
- csel w3, w3, w4, eq
- csel w0, w0, w8, eq
- tst x7, #1
- crc32\c\()b w8, w0, w3
- csel w0, w0, w8, eq
- tst x7, #16
- crc32\c\()x w8, w0, x5
- crc32\c\()x w8, w8, x6
- csel w0, w0, w8, eq
- cbz x2, 0f
-
-32: ldp x3, x4, [x1], #32
- sub x2, x2, #32
- ldp x5, x6, [x1, #-16]
- byteorder x3, \be
- byteorder x4, \be
- byteorder x5, \be
- byteorder x6, \be
- bitorder x3, \be
- bitorder x4, \be
- bitorder x5, \be
- bitorder x6, \be
- crc32\c\()x w0, w0, x3
- crc32\c\()x w0, w0, x4
- crc32\c\()x w0, w0, x5
- crc32\c\()x w0, w0, x6
- cbnz x2, 32b
-0: bitorder w0, \be
- ret
-
-8: tbz x2, #3, 4f
- ldr x3, [x1], #8
- byteorder x3, \be
- bitorder x3, \be
- crc32\c\()x w0, w0, x3
-4: tbz x2, #2, 2f
- ldr w3, [x1], #4
- byteorder w3, \be
- bitorder w3, \be
- crc32\c\()w w0, w0, w3
-2: tbz x2, #1, 1f
- ldrh w3, [x1], #2
- byteorder16 w3, \be
- bitorder16 w3, \be
- crc32\c\()h w0, w0, w3
-1: tbz x2, #0, 0f
- ldrb w3, [x1]
- bitorder8 w3, \be
- crc32\c\()b w0, w0, w3
-0: bitorder w0, \be
- ret
- .endm
-
- .align 5
-SYM_FUNC_START(crc32_le)
-alternative_if_not ARM64_HAS_CRC32
- b crc32_le_base
-alternative_else_nop_endif
- __crc32
-SYM_FUNC_END(crc32_le)
-
- .align 5
-SYM_FUNC_START(__crc32c_le)
-alternative_if_not ARM64_HAS_CRC32
- b __crc32c_le_base
-alternative_else_nop_endif
- __crc32 c
-SYM_FUNC_END(__crc32c_le)
-
- .align 5
-SYM_FUNC_START(crc32_be)
-alternative_if_not ARM64_HAS_CRC32
- b crc32_be_base
-alternative_else_nop_endif
- __crc32 be=1
-SYM_FUNC_END(crc32_be)
diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c
index 78b87a64ca0a..2432683e48a6 100644
--- a/arch/arm64/lib/csum.c
+++ b/arch/arm64/lib/csum.c
@@ -24,7 +24,7 @@ unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
const u64 *ptr;
u64 data, sum64 = 0;
- if (unlikely(len == 0))
+ if (unlikely(len <= 0))
return 0;
offset = (unsigned long)buff & 7;
diff --git a/arch/arm64/lib/delay.c b/arch/arm64/lib/delay.c
index 5b7890139bc2..cb2062e7e234 100644
--- a/arch/arm64/lib/delay.c
+++ b/arch/arm64/lib/delay.c
@@ -27,7 +27,7 @@ void __delay(unsigned long cycles)
{
cycles_t start = get_cycles();
- if (cpus_have_const_cap(ARM64_HAS_WFXT)) {
+ if (alternative_has_cap_unlikely(ARM64_HAS_WFXT)) {
u64 end = start + cycles;
/*
diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c
index 924934cb85ee..4e298baddc2e 100644
--- a/arch/arm64/lib/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -5,6 +5,7 @@
*
* Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/printk.h>
@@ -385,6 +386,9 @@ u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
insn = aarch64_insn_get_ldr_reg_value();
break;
+ case AARCH64_INSN_LDST_SIGNED_LOAD_REG_OFFSET:
+ insn = aarch64_insn_get_signed_ldr_reg_value();
+ break;
case AARCH64_INSN_LDST_STORE_REG_OFFSET:
insn = aarch64_insn_get_str_reg_value();
break;
@@ -430,6 +434,9 @@ u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,
case AARCH64_INSN_LDST_LOAD_IMM_OFFSET:
insn = aarch64_insn_get_ldr_imm_value();
break;
+ case AARCH64_INSN_LDST_SIGNED_LOAD_IMM_OFFSET:
+ insn = aarch64_insn_get_signed_load_imm_value();
+ break;
case AARCH64_INSN_LDST_STORE_IMM_OFFSET:
insn = aarch64_insn_get_str_imm_value();
break;
@@ -534,6 +541,35 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
offset >> shift);
}
+u32 aarch64_insn_gen_load_acq_store_rel(enum aarch64_insn_register reg,
+ enum aarch64_insn_register base,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_ldst_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_LDST_LOAD_ACQ:
+ insn = aarch64_insn_get_load_acq_value();
+ break;
+ case AARCH64_INSN_LDST_STORE_REL:
+ insn = aarch64_insn_get_store_rel_value();
+ break;
+ default:
+ pr_err("%s: unknown load-acquire/store-release encoding %d\n",
+ __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_ldst_size(size, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
+ reg);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ base);
+}
+
u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
enum aarch64_insn_register base,
enum aarch64_insn_register state,
@@ -1465,43 +1501,41 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
}
-u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
+static u32 __get_barrier_crm_val(enum aarch64_insn_mb_type type)
{
- u32 opt;
- u32 insn;
-
switch (type) {
case AARCH64_INSN_MB_SY:
- opt = 0xf;
- break;
+ return 0xf;
case AARCH64_INSN_MB_ST:
- opt = 0xe;
- break;
+ return 0xe;
case AARCH64_INSN_MB_LD:
- opt = 0xd;
- break;
+ return 0xd;
case AARCH64_INSN_MB_ISH:
- opt = 0xb;
- break;
+ return 0xb;
case AARCH64_INSN_MB_ISHST:
- opt = 0xa;
- break;
+ return 0xa;
case AARCH64_INSN_MB_ISHLD:
- opt = 0x9;
- break;
+ return 0x9;
case AARCH64_INSN_MB_NSH:
- opt = 0x7;
- break;
+ return 0x7;
case AARCH64_INSN_MB_NSHST:
- opt = 0x6;
- break;
+ return 0x6;
case AARCH64_INSN_MB_NSHLD:
- opt = 0x5;
- break;
+ return 0x5;
default:
- pr_err("%s: unknown dmb type %d\n", __func__, type);
+ pr_err("%s: unknown barrier type %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
+}
+
+u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
+{
+ u32 opt;
+ u32 insn;
+
+ opt = __get_barrier_crm_val(type);
+ if (opt == AARCH64_BREAK_FAULT)
+ return AARCH64_BREAK_FAULT;
insn = aarch64_insn_get_dmb_value();
insn &= ~GENMASK(11, 8);
@@ -1509,3 +1543,29 @@ u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
return insn;
}
+
+u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type)
+{
+ u32 opt, insn;
+
+ opt = __get_barrier_crm_val(type);
+ if (opt == AARCH64_BREAK_FAULT)
+ return AARCH64_BREAK_FAULT;
+
+ insn = aarch64_insn_get_dsb_base_value();
+ insn &= ~GENMASK(11, 8);
+ insn |= (opt << 8);
+
+ return insn;
+}
+
+u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
+ enum aarch64_insn_system_register sysreg)
+{
+ u32 insn = aarch64_insn_get_mrs_value();
+
+ insn &= ~GENMASK(19, 0);
+ insn |= sysreg << 5;
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT,
+ insn, result);
+}
diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
index 4ab48d49c451..9b99106fb95f 100644
--- a/arch/arm64/lib/memcpy.S
+++ b/arch/arm64/lib/memcpy.S
@@ -57,7 +57,7 @@
The loop tail is handled by always copying 64 bytes from the end.
*/
-SYM_FUNC_START(__pi_memcpy)
+SYM_FUNC_START_LOCAL(__pi_memcpy_generic)
add srcend, src, count
add dstend, dstin, count
cmp count, 128
@@ -238,7 +238,24 @@ L(copy64_from_start):
stp B_l, B_h, [dstin, 16]
stp C_l, C_h, [dstin]
ret
+SYM_FUNC_END(__pi_memcpy_generic)
+
+#ifdef CONFIG_AS_HAS_MOPS
+ .arch_extension mops
+SYM_FUNC_START(__pi_memcpy)
+alternative_if_not ARM64_HAS_MOPS
+ b __pi_memcpy_generic
+alternative_else_nop_endif
+
+ mov dst, dstin
+ cpyp [dst]!, [src]!, count!
+ cpym [dst]!, [src]!, count!
+ cpye [dst]!, [src]!, count!
+ ret
SYM_FUNC_END(__pi_memcpy)
+#else
+SYM_FUNC_ALIAS(__pi_memcpy, __pi_memcpy_generic)
+#endif
SYM_FUNC_ALIAS(__memcpy, __pi_memcpy)
EXPORT_SYMBOL(__memcpy)
diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S
index a5aebe82ad73..97157da65ec6 100644
--- a/arch/arm64/lib/memset.S
+++ b/arch/arm64/lib/memset.S
@@ -26,6 +26,7 @@
*/
dstin .req x0
+val_x .req x1
val .req w1
count .req x2
tmp1 .req x3
@@ -42,7 +43,7 @@ dst .req x8
tmp3w .req w9
tmp3 .req x9
-SYM_FUNC_START(__pi_memset)
+SYM_FUNC_START_LOCAL(__pi_memset_generic)
mov dst, dstin /* Preserve return value. */
and A_lw, val, #255
orr A_lw, A_lw, A_lw, lsl #8
@@ -201,7 +202,24 @@ SYM_FUNC_START(__pi_memset)
ands count, count, zva_bits_x
b.ne .Ltail_maybe_long
ret
+SYM_FUNC_END(__pi_memset_generic)
+
+#ifdef CONFIG_AS_HAS_MOPS
+ .arch_extension mops
+SYM_FUNC_START(__pi_memset)
+alternative_if_not ARM64_HAS_MOPS
+ b __pi_memset_generic
+alternative_else_nop_endif
+
+ mov dst, dstin
+ setp [dst]!, count!, val_x
+ setm [dst]!, count!, val_x
+ sete [dst]!, count!, val_x
+ ret
SYM_FUNC_END(__pi_memset)
+#else
+SYM_FUNC_ALIAS(__pi_memset, __pi_memset_generic)
+#endif
SYM_FUNC_ALIAS(__memset, __pi_memset)
EXPORT_SYMBOL(__memset)
diff --git a/arch/arm64/lib/uaccess_flushcache.c b/arch/arm64/lib/uaccess_flushcache.c
index baee22961bdb..7510d1a23124 100644
--- a/arch/arm64/lib/uaccess_flushcache.c
+++ b/arch/arm64/lib/uaccess_flushcache.c
@@ -19,12 +19,6 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt)
}
EXPORT_SYMBOL_GPL(memcpy_flushcache);
-void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
- size_t len)
-{
- memcpy_flushcache(to, page_address(page) + offset, len);
-}
-
unsigned long __copy_user_flushcache(void *to, const void __user *from,
unsigned long n)
{
diff --git a/arch/arm64/lib/xor-neon.c b/arch/arm64/lib/xor-neon.c
index 96b171995d19..8fffebfa17b2 100644
--- a/arch/arm64/lib/xor-neon.c
+++ b/arch/arm64/lib/xor-neon.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <asm/neon-intrinsics.h>
-void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1,
+static void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2)
{
uint64_t *dp1 = (uint64_t *)p1;
@@ -37,7 +37,7 @@ void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1,
} while (--lines > 0);
}
-void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1,
+static void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3)
{
@@ -73,7 +73,7 @@ void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1,
} while (--lines > 0);
}
-void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1,
+static void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4)
@@ -118,7 +118,7 @@ void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1,
} while (--lines > 0);
}
-void xor_arm64_neon_5(unsigned long bytes, unsigned long * __restrict p1,
+static void xor_arm64_neon_5(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4,
@@ -319,7 +319,7 @@ static void xor_arm64_eor3_5(unsigned long bytes,
static int __init xor_neon_init(void)
{
- if (IS_ENABLED(CONFIG_AS_HAS_SHA3) && cpu_have_named_feature(SHA3)) {
+ if (cpu_have_named_feature(SHA3)) {
xor_block_inner_neon.do_3 = xor_arm64_eor3_3;
xor_block_inner_neon.do_4 = xor_arm64_eor3_4;
xor_block_inner_neon.do_5 = xor_arm64_eor3_5;