summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-22 17:21:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-22 17:21:27 -0700
commitbd28b14591b98f696bc9f94c5ba2e598ca487dfd (patch)
tree5cd165412fa7dec2dbbac28ff6d8d5b12d3011f4
parent5b09c3edecd37ec1a52fbd5ae97a19734edc7a77 (diff)
x86: remove more uaccess_32.h complexity
I'm looking at trying to possibly merge the 32-bit and 64-bit versions of the x86 uaccess.h implementation, but first this needs to be cleaned up. For example, the 32-bit version of "__copy_from_user_inatomic()" is mostly the special cases for the constant size, and it's actually almost never relevant. Most users aren't actually using a constant size anyway, and the few cases that do small constant copies are better off just using __get_user() instead. So get rid of the unnecessary complexity. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/include/asm/uaccess_32.h26
-rw-r--r--kernel/events/uprobes.c3
-rw-r--r--kernel/futex.c2
-rw-r--r--mm/maccess.c3
4 files changed, 3 insertions, 31 deletions
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 537cc883ea29..4b32da24faaf 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -65,32 +65,6 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
static __always_inline unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
- /* Avoid zeroing the tail if the copy fails..
- * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
- * but as the zeroing behaviour is only significant when n is not
- * constant, that shouldn't be a problem.
- */
- if (__builtin_constant_p(n)) {
- unsigned long ret;
-
- switch (n) {
- case 1:
- __uaccess_begin();
- __get_user_size(*(u8 *)to, from, 1, ret, 1);
- __uaccess_end();
- return ret;
- case 2:
- __uaccess_begin();
- __get_user_size(*(u16 *)to, from, 2, ret, 2);
- __uaccess_end();
- return ret;
- case 4:
- __uaccess_begin();
- __get_user_size(*(u32 *)to, from, 4, ret, 4);
- __uaccess_end();
- return ret;
- }
- }
return __copy_from_user_ll_nozero(to, from, n);
}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 7edc95edfaee..c01f733ff2e1 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1694,8 +1694,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
int result;
pagefault_disable();
- result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
- sizeof(opcode));
+ result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
pagefault_enable();
if (likely(result == 0))
diff --git a/kernel/futex.c b/kernel/futex.c
index c20f06f38ef3..ee25f5ba4aca 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -729,7 +729,7 @@ static int get_futex_value_locked(u32 *dest, u32 __user *from)
int ret;
pagefault_disable();
- ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
+ ret = __get_user(*dest, from);
pagefault_enable();
return ret ? -EFAULT : 0;
diff --git a/mm/maccess.c b/mm/maccess.c
index d159b1c96e48..78f9274dd49d 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -96,8 +96,7 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
pagefault_disable();
do {
- ret = __copy_from_user_inatomic(dst++,
- (const void __user __force *)src++, 1);
+ ret = __get_user(*dst++, (const char __user __force *)src++);
} while (dst[-1] && ret == 0 && src - unsafe_addr < count);
dst[-1] = '\0';