summaryrefslogtreecommitdiff
path: root/arch/alpha/lib/ev6-memset.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha/lib/ev6-memset.S')
-rw-r--r--arch/alpha/lib/ev6-memset.S15
1 files changed, 8 insertions, 7 deletions
diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S
index fed21c6893e8..89d7809da4cc 100644
--- a/arch/alpha/lib/ev6-memset.S
+++ b/arch/alpha/lib/ev6-memset.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/alpha/lib/ev6-memset.S
*
@@ -17,7 +18,7 @@
* The algorithm for the leading and trailing quadwords remains the same,
* however the loop has been unrolled to enable better memory throughput,
* and the code has been replicated for each of the entry points: __memset
- * and __memsetw to permit better scheduling to eliminate the stalling
+ * and __memset16 to permit better scheduling to eliminate the stalling
* encountered during the mask replication.
* A future enhancement might be to put in a byte store loop for really
* small (say < 32 bytes) memset()s. Whether or not that change would be
@@ -26,14 +27,14 @@
* as fixes will need to be made in multiple places. The performance gain
* is worth it.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.set noat
.set noreorder
.text
.globl memset
.globl __memset
.globl ___memset
- .globl __memsetw
+ .globl __memset16
.globl __constant_c_memset
.ent ___memset
@@ -414,9 +415,9 @@ end:
* to mask stalls. Note that entry point names also had to change
*/
.align 5
- .ent __memsetw
+ .ent __memset16
-__memsetw:
+__memset16:
.frame $30,0,$26,0
.prologue 0
@@ -595,8 +596,8 @@ end_w:
nop
ret $31,($26),1 # L0 :
- .end __memsetw
- EXPORT_SYMBOL(__memsetw)
+ .end __memset16
+ EXPORT_SYMBOL(__memset16)
memset = ___memset
__memset = ___memset