From 5e6039d8a307d8411422c154f3d446b44fa32b6d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 18:00:15 -0500 Subject: uaccess: move VERIFY_{READ,WRITE} definitions to linux/uaccess.h Signed-off-by: Al Viro --- arch/xtensa/include/asm/asm-uaccess.h | 3 --- arch/xtensa/include/asm/uaccess.h | 3 --- 2 files changed, 6 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h index a7a110039786..dfdf9fae1f84 100644 --- a/arch/xtensa/include/asm/asm-uaccess.h +++ b/arch/xtensa/include/asm/asm-uaccess.h @@ -19,9 +19,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #include #include #include diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 848a3d736bcb..dd6b13649aad 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -20,9 +20,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #include /* -- cgit From af1d5b37d6211c814fac0d5d0b71ec695618054a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 18:14:09 -0500 Subject: uaccess: drop duplicate includes from asm/uaccess.h Signed-off-by: Al Viro --- arch/xtensa/include/asm/uaccess.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index dd6b13649aad..bd8861c811ef 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -16,12 +16,9 @@ #ifndef _XTENSA_UACCESS_H #define _XTENSA_UACCESS_H -#include #include #include -#include - /* * The fs value determines whether argument validity checking should * be performed or not. If get_fs() == USER_DS, checking is -- cgit From 9849a5697d3defb2087cb6b9be5573a142697889 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 9 Mar 2017 17:24:05 +0300 Subject: arch, mm: convert all architectures to use 5level-fixup.h If an architecture uses 4level-fixup.h we don't need to do anything as it includes 5level-fixup.h. If an architecture uses pgtable-nop*d.h, define __ARCH_USE_5LEVEL_HACK before inclusion of the header. It makes asm-generic code to use 5level-fixup.h. If an architecture has 4-level paging or folds levels on its own, include 5level-fixup.h directly. Signed-off-by: Kirill A. Shutemov Acked-by: Michal Hocko Signed-off-by: Linus Torvalds --- arch/xtensa/include/asm/pgtable.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 8aa0e0d9cbb2..30dd5b2e4ad5 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -11,6 +11,7 @@ #ifndef _XTENSA_PGTABLE_H #define _XTENSA_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #include #include -- cgit From db68ce10c4f0a27c1ff9fa0e789e5c41f8c4ea63 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Mar 2017 21:08:07 -0400 Subject: new helper: uaccess_kernel() Signed-off-by: Al Viro --- arch/xtensa/include/asm/uaccess.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index bd8861c811ef..26512692e28f 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -37,7 +37,7 @@ #define segment_eq(a, b) ((a).seg == (b).seg) -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __kernel_ok (uaccess_kernel()) #define __user_ok(addr, size) \ (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) -- cgit From 0b46a94e84c1323d54f8b82eacd3143400fb9521 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 14:34:44 -0500 Subject: xtensa: switch to generic extable.h Signed-off-by: Al Viro --- arch/xtensa/include/asm/Kbuild | 1 + arch/xtensa/include/asm/uaccess.h | 7 +------ 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index f41408c53fe1..cc23e9ecc6bb 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -6,6 +6,7 @@ generic-y += dma-contiguous.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += extable.h generic-y += fcntl.h generic-y += hardirq.h generic-y += ioctl.h diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 26512692e28f..0f338774af99 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -18,6 +18,7 @@ #include #include +#include /* * The fs value determines whether argument validity checking should @@ -342,10 +343,4 @@ static inline long strnlen_user(const char *str, long len) return __strnlen_user(str, len); } - -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - #endif /* _XTENSA_UACCESS_H */ -- cgit From 3a0e75adecc8da026a5befb2c5828d08c999373c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 22 Mar 2017 13:02:41 -0400 Subject: xtensa: get rid of zeroing, use RAW_COPY_USER Signed-off-by: Al Viro --- arch/xtensa/Kconfig | 1 + arch/xtensa/include/asm/uaccess.h | 54 +++--------------- arch/xtensa/lib/usercopy.S | 116 ++++++++++++++++---------------------- 3 files changed, 57 insertions(+), 114 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index f4126cf997a4..043d37d45919 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -29,6 +29,7 @@ config XTENSA select NO_BOOTMEM select PERF_USE_VMALLOC select VIRT_TO_BUS + select ARCH_HAS_RAW_COPY_USER help Xtensa processors are 32-bit RISC machines designed by Tensilica primarily for embedded systems. These processors are both diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 0f338774af99..8e93ed8ad1fe 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -234,60 +234,22 @@ __asm__ __volatile__( \ * Copy to/from user space */ -/* - * We use a generic, arbitrary-sized copy subroutine. The Xtensa - * architecture would cause heavy code bloat if we tried to inline - * these functions and provide __constant_copy_* equivalents like the - * i386 versions. __xtensa_copy_user is quite efficient. See the - * .fixup section of __xtensa_copy_user for a discussion on the - * X_zeroing equivalents for Xtensa. - */ - extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); -#define __copy_user(to, from, size) __xtensa_copy_user(to, from, size) - static inline unsigned long -__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - return __copy_user(to, from, n); -} - -static inline unsigned long -__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n) -{ - return __copy_user(to, from, n); -} - -static inline unsigned long -__generic_copy_to_user(void *to, const void *from, unsigned long n) -{ - prefetch(from); - if (access_ok(VERIFY_WRITE, to, n)) - return __copy_user(to, from, n); - return n; + prefetchw(to); + return __xtensa_copy_user(to, (__force const void *)from, n); } - static inline unsigned long -__generic_copy_from_user(void *to, const void *from, unsigned long n) +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - prefetchw(to); - if (access_ok(VERIFY_READ, from, n)) - return __copy_user(to, from, n); - else - memset(to, 0, n); - return n; + prefetchw(from); + return __xtensa_copy_user((__force void *)to, from, n); } - -#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n)) -#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n)) -#define __copy_to_user(to, from, n) \ - __generic_copy_to_user_nocheck((to), (from), (n)) -#define __copy_from_user(to, from, n) \ - __generic_copy_from_user_nocheck((to), (from), (n)) -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER /* * We need to return the number of bytes not cleared. Our memset() diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S index 7ea4dd68893e..d9cd766bde3e 100644 --- a/arch/xtensa/lib/usercopy.S +++ b/arch/xtensa/lib/usercopy.S @@ -102,9 +102,9 @@ __xtensa_copy_user: bltui a4, 7, .Lbytecopy # do short copies byte by byte # copy 1 byte - EX(l8ui, a6, a3, 0, l_fixup) + EX(l8ui, a6, a3, 0, fixup) addi a3, a3, 1 - EX(s8i, a6, a5, 0, s_fixup) + EX(s8i, a6, a5, 0, fixup) addi a5, a5, 1 addi a4, a4, -1 bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then @@ -112,11 +112,11 @@ __xtensa_copy_user: .Ldst2mod4: # dst 16-bit aligned # copy 2 bytes bltui a4, 6, .Lbytecopy # do short copies byte by byte - EX(l8ui, a6, a3, 0, l_fixup) - EX(l8ui, a7, a3, 1, l_fixup) + EX(l8ui, a6, a3, 0, fixup) + EX(l8ui, a7, a3, 1, fixup) addi a3, a3, 2 - EX(s8i, a6, a5, 0, s_fixup) - EX(s8i, a7, a5, 1, s_fixup) + EX(s8i, a6, a5, 0, fixup) + EX(s8i, a7, a5, 1, fixup) addi a5, a5, 2 addi a4, a4, -2 j .Ldstaligned # dst is now aligned, return to main algorithm @@ -135,9 +135,9 @@ __xtensa_copy_user: add a7, a3, a4 # a7 = end address for source #endif /* !XCHAL_HAVE_LOOPS */ .Lnextbyte: - EX(l8ui, a6, a3, 0, l_fixup) + EX(l8ui, a6, a3, 0, fixup) addi a3, a3, 1 - EX(s8i, a6, a5, 0, s_fixup) + EX(s8i, a6, a5, 0, fixup) addi a5, a5, 1 #if !XCHAL_HAVE_LOOPS blt a3, a7, .Lnextbyte @@ -161,15 +161,15 @@ __xtensa_copy_user: add a8, a8, a3 # a8 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop1: - EX(l32i, a6, a3, 0, l_fixup) - EX(l32i, a7, a3, 4, l_fixup) - EX(s32i, a6, a5, 0, s_fixup) - EX(l32i, a6, a3, 8, l_fixup) - EX(s32i, a7, a5, 4, s_fixup) - EX(l32i, a7, a3, 12, l_fixup) - EX(s32i, a6, a5, 8, s_fixup) + EX(l32i, a6, a3, 0, fixup) + EX(l32i, a7, a3, 4, fixup) + EX(s32i, a6, a5, 0, fixup) + EX(l32i, a6, a3, 8, fixup) + EX(s32i, a7, a5, 4, fixup) + EX(l32i, a7, a3, 12, fixup) + EX(s32i, a6, a5, 8, fixup) addi a3, a3, 16 - EX(s32i, a7, a5, 12, s_fixup) + EX(s32i, a7, a5, 12, fixup) addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS blt a3, a8, .Loop1 @@ -177,31 +177,31 @@ __xtensa_copy_user: .Loop1done: bbci.l a4, 3, .L2 # copy 8 bytes - EX(l32i, a6, a3, 0, l_fixup) - EX(l32i, a7, a3, 4, l_fixup) + EX(l32i, a6, a3, 0, fixup) + EX(l32i, a7, a3, 4, fixup) addi a3, a3, 8 - EX(s32i, a6, a5, 0, s_fixup) - EX(s32i, a7, a5, 4, s_fixup) + EX(s32i, a6, a5, 0, fixup) + EX(s32i, a7, a5, 4, fixup) addi a5, a5, 8 .L2: bbci.l a4, 2, .L3 # copy 4 bytes - EX(l32i, a6, a3, 0, l_fixup) + EX(l32i, a6, a3, 0, fixup) addi a3, a3, 4 - EX(s32i, a6, a5, 0, s_fixup) + EX(s32i, a6, a5, 0, fixup) addi a5, a5, 4 .L3: bbci.l a4, 1, .L4 # copy 2 bytes - EX(l16ui, a6, a3, 0, l_fixup) + EX(l16ui, a6, a3, 0, fixup) addi a3, a3, 2 - EX(s16i, a6, a5, 0, s_fixup) + EX(s16i, a6, a5, 0, fixup) addi a5, a5, 2 .L4: bbci.l a4, 0, .L5 # copy 1 byte - EX(l8ui, a6, a3, 0, l_fixup) - EX(s8i, a6, a5, 0, s_fixup) + EX(l8ui, a6, a3, 0, fixup) + EX(s8i, a6, a5, 0, fixup) .L5: movi a2, 0 # return success for len bytes copied retw @@ -217,7 +217,7 @@ __xtensa_copy_user: # copy 16 bytes per iteration for word-aligned dst and unaligned src and a10, a3, a8 # save unalignment offset for below sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware) - EX(l32i, a6, a3, 0, l_fixup) # load first word + EX(l32i, a6, a3, 0, fixup) # load first word #if XCHAL_HAVE_LOOPS loopnez a7, .Loop2done #else /* !XCHAL_HAVE_LOOPS */ @@ -226,19 +226,19 @@ __xtensa_copy_user: add a12, a12, a3 # a12 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop2: - EX(l32i, a7, a3, 4, l_fixup) - EX(l32i, a8, a3, 8, l_fixup) + EX(l32i, a7, a3, 4, fixup) + EX(l32i, a8, a3, 8, fixup) ALIGN( a6, a6, a7) - EX(s32i, a6, a5, 0, s_fixup) - EX(l32i, a9, a3, 12, l_fixup) + EX(s32i, a6, a5, 0, fixup) + EX(l32i, a9, a3, 12, fixup) ALIGN( a7, a7, a8) - EX(s32i, a7, a5, 4, s_fixup) - EX(l32i, a6, a3, 16, l_fixup) + EX(s32i, a7, a5, 4, fixup) + EX(l32i, a6, a3, 16, fixup) ALIGN( a8, a8, a9) - EX(s32i, a8, a5, 8, s_fixup) + EX(s32i, a8, a5, 8, fixup) addi a3, a3, 16 ALIGN( a9, a9, a6) - EX(s32i, a9, a5, 12, s_fixup) + EX(s32i, a9, a5, 12, fixup) addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS blt a3, a12, .Loop2 @@ -246,39 +246,39 @@ __xtensa_copy_user: .Loop2done: bbci.l a4, 3, .L12 # copy 8 bytes - EX(l32i, a7, a3, 4, l_fixup) - EX(l32i, a8, a3, 8, l_fixup) + EX(l32i, a7, a3, 4, fixup) + EX(l32i, a8, a3, 8, fixup) ALIGN( a6, a6, a7) - EX(s32i, a6, a5, 0, s_fixup) + EX(s32i, a6, a5, 0, fixup) addi a3, a3, 8 ALIGN( a7, a7, a8) - EX(s32i, a7, a5, 4, s_fixup) + EX(s32i, a7, a5, 4, fixup) addi a5, a5, 8 mov a6, a8 .L12: bbci.l a4, 2, .L13 # copy 4 bytes - EX(l32i, a7, a3, 4, l_fixup) + EX(l32i, a7, a3, 4, fixup) addi a3, a3, 4 ALIGN( a6, a6, a7) - EX(s32i, a6, a5, 0, s_fixup) + EX(s32i, a6, a5, 0, fixup) addi a5, a5, 4 mov a6, a7 .L13: add a3, a3, a10 # readjust a3 with correct misalignment bbci.l a4, 1, .L14 # copy 2 bytes - EX(l8ui, a6, a3, 0, l_fixup) - EX(l8ui, a7, a3, 1, l_fixup) + EX(l8ui, a6, a3, 0, fixup) + EX(l8ui, a7, a3, 1, fixup) addi a3, a3, 2 - EX(s8i, a6, a5, 0, s_fixup) - EX(s8i, a7, a5, 1, s_fixup) + EX(s8i, a6, a5, 0, fixup) + EX(s8i, a7, a5, 1, fixup) addi a5, a5, 2 .L14: bbci.l a4, 0, .L15 # copy 1 byte - EX(l8ui, a6, a3, 0, l_fixup) - EX(s8i, a6, a5, 0, s_fixup) + EX(l8ui, a6, a3, 0, fixup) + EX(s8i, a6, a5, 0, fixup) .L15: movi a2, 0 # return success for len bytes copied retw @@ -291,30 +291,10 @@ __xtensa_copy_user: * bytes_copied = a5 - a2 * retval = bytes_not_copied = original len - bytes_copied * retval = a11 - (a5 - a2) - * - * Clearing the remaining pieces of kernel memory plugs security - * holes. This functionality is the equivalent of the *_zeroing - * functions that some architectures provide. */ -.Lmemset: - .word memset -s_fixup: +fixup: sub a2, a5, a2 /* a2 <-- bytes copied */ sub a2, a11, a2 /* a2 <-- bytes not copied */ retw - -l_fixup: - sub a2, a5, a2 /* a2 <-- bytes copied */ - sub a2, a11, a2 /* a2 <-- bytes not copied == return value */ - - /* void *memset(void *s, int c, size_t n); */ - mov a6, a5 /* s */ - movi a7, 0 /* c */ - mov a8, a2 /* n */ - l32r a4, .Lmemset - callx4 a4 - /* Ignore memset return value in a6. */ - /* a2 still contains bytes not copied. */ - retw -- cgit From 7d4914db8fda6d38d92b1b8a740bafbd6c6d89a1 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 4 Apr 2017 13:26:29 -0700 Subject: xtensa: fix prefetch in the raw_copy_to_user 'from' is the input buffer, it should be prefetched with prefetch, not prefetchw. Tested-by: Max Filippov Signed-off-by: Max Filippov Signed-off-by: Al Viro --- arch/xtensa/include/asm/uaccess.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 8e93ed8ad1fe..2e7bac0d4b2c 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -245,7 +245,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n) static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - prefetchw(from); + prefetch(from); return __xtensa_copy_user((__force void *)to, from, n); } #define INLINE_COPY_FROM_USER -- cgit