summaryrefslogtreecommitdiff
path: root/arch/tile/lib
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2017-03-21 14:27:36 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2017-03-28 18:24:02 -0400
commit23504bae7f3edd1484c4d470362f2b12bcd298f9 (patch)
treeae7aaa8ee94fd9f195ef2a741f2f64e48ea75a95 /arch/tile/lib
parentc0ea73f18c057b41801106cfd2ffaf9794681444 (diff)
tile: get rid of zeroing, switch to RAW_COPY_USER
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'arch/tile/lib')
-rw-r--r--arch/tile/lib/exports.c7
-rw-r--r--arch/tile/lib/memcpy_32.S41
-rw-r--r--arch/tile/lib/memcpy_user_64.c15
3 files changed, 19 insertions, 44 deletions
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index c5369fe643c7..ecce8e177e3f 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -38,11 +38,10 @@ EXPORT_SYMBOL(__mcount);
/* arch/tile/lib/, various memcpy files */
EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(__copy_to_user_inatomic);
-EXPORT_SYMBOL(__copy_from_user_inatomic);
-EXPORT_SYMBOL(__copy_from_user_zeroing);
+EXPORT_SYMBOL(raw_copy_to_user);
+EXPORT_SYMBOL(raw_copy_from_user);
#ifdef __tilegx__
-EXPORT_SYMBOL(__copy_in_user_inatomic);
+EXPORT_SYMBOL(raw_copy_in_user);
#endif
/* hypervisor glue */
diff --git a/arch/tile/lib/memcpy_32.S b/arch/tile/lib/memcpy_32.S
index a2771ae5da53..270f1267cd18 100644
--- a/arch/tile/lib/memcpy_32.S
+++ b/arch/tile/lib/memcpy_32.S
@@ -24,7 +24,6 @@
#define IS_MEMCPY 0
#define IS_COPY_FROM_USER 1
-#define IS_COPY_FROM_USER_ZEROING 2
#define IS_COPY_TO_USER -1
.section .text.memcpy_common, "ax"
@@ -42,40 +41,31 @@
9
-/* __copy_from_user_inatomic takes the kernel target address in r0,
+/* raw_copy_from_user takes the kernel target address in r0,
* the user source in r1, and the bytes to copy in r2.
* It returns the number of uncopiable bytes (hopefully zero) in r0.
*/
-ENTRY(__copy_from_user_inatomic)
-.type __copy_from_user_inatomic, @function
- FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
+ENTRY(raw_copy_from_user)
+.type raw_copy_from_user, @function
+ FEEDBACK_ENTER_EXPLICIT(raw_copy_from_user, \
.text.memcpy_common, \
- .Lend_memcpy_common - __copy_from_user_inatomic)
+ .Lend_memcpy_common - raw_copy_from_user)
{ movei r29, IS_COPY_FROM_USER; j memcpy_common }
- .size __copy_from_user_inatomic, . - __copy_from_user_inatomic
+ .size raw_copy_from_user, . - raw_copy_from_user
-/* __copy_from_user_zeroing is like __copy_from_user_inatomic, but
- * any uncopiable bytes are zeroed in the target.
- */
-ENTRY(__copy_from_user_zeroing)
-.type __copy_from_user_zeroing, @function
- FEEDBACK_REENTER(__copy_from_user_inatomic)
- { movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common }
- .size __copy_from_user_zeroing, . - __copy_from_user_zeroing
-
-/* __copy_to_user_inatomic takes the user target address in r0,
+/* raw_copy_to_user takes the user target address in r0,
* the kernel source in r1, and the bytes to copy in r2.
* It returns the number of uncopiable bytes (hopefully zero) in r0.
*/
-ENTRY(__copy_to_user_inatomic)
-.type __copy_to_user_inatomic, @function
- FEEDBACK_REENTER(__copy_from_user_inatomic)
+ENTRY(raw_copy_to_user)
+.type raw_copy_to_user, @function
+ FEEDBACK_REENTER(raw_copy_from_user)
{ movei r29, IS_COPY_TO_USER; j memcpy_common }
- .size __copy_to_user_inatomic, . - __copy_to_user_inatomic
+ .size raw_copy_to_user, . - raw_copy_to_user
ENTRY(memcpy)
.type memcpy, @function
- FEEDBACK_REENTER(__copy_from_user_inatomic)
+ FEEDBACK_REENTER(raw_copy_from_user)
{ movei r29, IS_MEMCPY }
.size memcpy, . - memcpy
/* Fall through */
@@ -520,12 +510,7 @@ copy_from_user_fixup_loop:
{ bnzt r2, copy_from_user_fixup_loop }
.Lcopy_from_user_fixup_zero_remainder:
- { bbs r29, 2f } /* low bit set means IS_COPY_FROM_USER */
- /* byte-at-a-time loop faulted, so zero the rest. */
- { move r3, r2; bz r2, 2f /* should be impossible, but handle it. */ }
-1: { sb r0, zero; addi r0, r0, 1; addi r3, r3, -1 }
- { bnzt r3, 1b }
-2: move lr, r27
+ move lr, r27
{ move r0, r2; jrp lr }
copy_to_user_fixup_loop:
diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c
index 97bbb6060b25..a3fea9fd973e 100644
--- a/arch/tile/lib/memcpy_user_64.c
+++ b/arch/tile/lib/memcpy_user_64.c
@@ -51,7 +51,7 @@
__v; \
})
-#define USERCOPY_FUNC __copy_to_user_inatomic
+#define USERCOPY_FUNC raw_copy_to_user
#define ST1(p, v) _ST((p), st1, (v))
#define ST2(p, v) _ST((p), st2, (v))
#define ST4(p, v) _ST((p), st4, (v))
@@ -62,7 +62,7 @@
#define LD8 LD
#include "memcpy_64.c"
-#define USERCOPY_FUNC __copy_from_user_inatomic
+#define USERCOPY_FUNC raw_copy_from_user
#define ST1 ST
#define ST2 ST
#define ST4 ST
@@ -73,7 +73,7 @@
#define LD8(p) _LD((p), ld)
#include "memcpy_64.c"
-#define USERCOPY_FUNC __copy_in_user_inatomic
+#define USERCOPY_FUNC raw_copy_in_user
#define ST1(p, v) _ST((p), st1, (v))
#define ST2(p, v) _ST((p), st2, (v))
#define ST4(p, v) _ST((p), st4, (v))
@@ -83,12 +83,3 @@
#define LD4(p) _LD((p), ld4u)
#define LD8(p) _LD((p), ld)
#include "memcpy_64.c"
-
-unsigned long __copy_from_user_zeroing(void *to, const void __user *from,
- unsigned long n)
-{
- unsigned long rc = __copy_from_user_inatomic(to, from, n);
- if (unlikely(rc))
- memset(to + n - rc, 0, rc);
- return rc;
-}