summaryrefslogtreecommitdiff
path: root/arch/xtensa/include/asm/uaccess.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/include/asm/uaccess.h')
-rw-r--r--arch/xtensa/include/asm/uaccess.h67
1 files changed, 9 insertions, 58 deletions
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 848a3d736bcb..2e7bac0d4b2c 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -16,14 +16,9 @@
#ifndef _XTENSA_UACCESS_H
#define _XTENSA_UACCESS_H
-#include <linux/errno.h>
#include <linux/prefetch.h>
#include <asm/types.h>
-
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
-#include <linux/sched.h>
+#include <asm/extable.h>
/*
* The fs value determines whether argument validity checking should
@@ -43,7 +38,7 @@
#define segment_eq(a, b) ((a).seg == (b).seg)
-#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+#define __kernel_ok (uaccess_kernel())
#define __user_ok(addr, size) \
(((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
@@ -239,60 +234,22 @@ __asm__ __volatile__( \
* Copy to/from user space
*/
-/*
- * We use a generic, arbitrary-sized copy subroutine. The Xtensa
- * architecture would cause heavy code bloat if we tried to inline
- * these functions and provide __constant_copy_* equivalents like the
- * i386 versions. __xtensa_copy_user is quite efficient. See the
- * .fixup section of __xtensa_copy_user for a discussion on the
- * X_zeroing equivalents for Xtensa.
- */
-
extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
-#define __copy_user(to, from, size) __xtensa_copy_user(to, from, size)
-
static inline unsigned long
-__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- return __copy_user(to, from, n);
-}
-
-static inline unsigned long
-__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
-{
- return __copy_user(to, from, n);
+ prefetchw(to);
+ return __xtensa_copy_user(to, (__force const void *)from, n);
}
-
static inline unsigned long
-__generic_copy_to_user(void *to, const void *from, unsigned long n)
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
prefetch(from);
- if (access_ok(VERIFY_WRITE, to, n))
- return __copy_user(to, from, n);
- return n;
-}
-
-static inline unsigned long
-__generic_copy_from_user(void *to, const void *from, unsigned long n)
-{
- prefetchw(to);
- if (access_ok(VERIFY_READ, from, n))
- return __copy_user(to, from, n);
- else
- memset(to, 0, n);
- return n;
+ return __xtensa_copy_user((__force void *)to, from, n);
}
-
-#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
-#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
-#define __copy_to_user(to, from, n) \
- __generic_copy_to_user_nocheck((to), (from), (n))
-#define __copy_from_user(to, from, n) \
- __generic_copy_from_user_nocheck((to), (from), (n))
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
/*
* We need to return the number of bytes not cleared. Our memset()
@@ -348,10 +305,4 @@ static inline long strnlen_user(const char *str, long len)
return __strnlen_user(str, len);
}
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
#endif /* _XTENSA_UACCESS_H */