diff options
Diffstat (limited to 'arch/arm/include/asm/uaccess.h')
| -rw-r--r-- | arch/arm/include/asm/uaccess.h | 271 |
1 files changed, 156 insertions, 115 deletions
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index a13d90206472..d6ae80b5df36 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -8,10 +8,14 @@ /* * User space memory access functions */ +#include <linux/kernel.h> #include <linux/string.h> -#include <asm/memory.h> +#include <asm/page.h> #include <asm/domain.h> +#include <linux/unaligned.h> #include <asm/unified.h> +#include <asm/pgtable.h> +#include <asm/proc-fns.h> #include <asm/compiler.h> #include <asm/extable.h> @@ -22,9 +26,10 @@ * perform such accesses (eg, via list poison values) which could then * be exploited for priviledge escalation. */ +#if defined(CONFIG_CPU_SW_DOMAIN_PAN) + static __always_inline unsigned int uaccess_save_and_enable(void) { -#ifdef CONFIG_CPU_SW_DOMAIN_PAN unsigned int old_domain = get_domain(); /* Set the current domain access to permit user accesses */ @@ -32,66 +37,57 @@ static __always_inline unsigned int uaccess_save_and_enable(void) domain_val(DOMAIN_USER, DOMAIN_CLIENT)); return old_domain; -#else - return 0; -#endif } static __always_inline void uaccess_restore(unsigned int flags) { -#ifdef CONFIG_CPU_SW_DOMAIN_PAN /* Restore the user access mask */ set_domain(flags); -#endif } -/* - * These two are intentionally not defined anywhere - if the kernel - * code generates any references to them, that's a bug. - */ -extern int __get_user_bad(void); -extern int __put_user_bad(void); +#elif defined(CONFIG_CPU_TTBR0_PAN) -/* - * Note that this is actually 0x1,0000,0000 - */ -#define KERNEL_DS 0x00000000 - -#ifdef CONFIG_MMU - -#define USER_DS TASK_SIZE -#define get_fs() (current_thread_info()->addr_limit) - -static inline void set_fs(mm_segment_t fs) +static __always_inline unsigned int uaccess_save_and_enable(void) { - current_thread_info()->addr_limit = fs; + unsigned int old_ttbcr = cpu_get_ttbcr(); /* - * Prevent a mispredicted conditional call to set_fs from forwarding - * the wrong address limit to access_ok under speculation. + * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from + * TTBR0 (A1 = 0). */ - dsb(nsh); + cpu_set_ttbcr(old_ttbcr & ~(TTBCR_A1 | TTBCR_EPD0 | TTBCR_T0SZ_MASK)); isb(); - modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); + return old_ttbcr; } -#define uaccess_kernel() (get_fs() == KERNEL_DS) +static inline void uaccess_restore(unsigned int flags) +{ + cpu_set_ttbcr(flags); + isb(); +} + +#else + +static inline unsigned int uaccess_save_and_enable(void) +{ + return 0; +} + +static inline void uaccess_restore(unsigned int flags) +{ +} + +#endif /* - * We use 33-bit arithmetic here. Success returns zero, failure returns - * addr_limit. We take advantage that addr_limit will be zero for KERNEL_DS, - * so this will always return success in that case. + * These two are intentionally not defined anywhere - if the kernel + * code generates any references to them, that's a bug. */ -#define __range_ok(addr, size) ({ \ - unsigned long flag, roksum; \ - __chk_user_ptr(addr); \ - __asm__(".syntax unified\n" \ - "adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \ - : "=&r" (flag), "=&r" (roksum) \ - : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \ - : "cc"); \ - flag; }) +extern int __get_user_bad(void); +extern int __put_user_bad(void); + +#ifdef CONFIG_MMU /* * This is a type: either unsigned long, if the argument fits into @@ -120,7 +116,7 @@ static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr, " subshs %1, %1, %2\n" " movlo %0, #0\n" : "+r" (safe_ptr), "=&r" (tmp) - : "r" (size), "r" (current_thread_info()->addr_limit) + : "r" (size), "r" (TASK_SIZE) : "cc"); csdb(); @@ -147,16 +143,6 @@ extern int __get_user_64t_1(void *); extern int __get_user_64t_2(void *); extern int __get_user_64t_4(void *); -#define __GUP_CLOBBER_1 "lr", "cc" -#ifdef CONFIG_CPU_USE_DOMAINS -#define __GUP_CLOBBER_2 "ip", "lr", "cc" -#else -#define __GUP_CLOBBER_2 "lr", "cc" -#endif -#define __GUP_CLOBBER_4 "lr", "cc" -#define __GUP_CLOBBER_32t_8 "lr", "cc" -#define __GUP_CLOBBER_8 "lr", "cc" - #define __get_user_x(__r2, __p, __e, __l, __s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%1", "r2") \ @@ -164,7 +150,7 @@ extern int __get_user_64t_4(void *); "bl __get_user_" #__s \ : "=&r" (__e), "=r" (__r2) \ : "0" (__p), "r" (__l) \ - : __GUP_CLOBBER_##__s) + : "ip", "lr", "cc") /* narrowing a double-word get into a single 32bit word register: */ #ifdef __ARMEB__ @@ -186,7 +172,7 @@ extern int __get_user_64t_4(void *); "bl __get_user_64t_" #__s \ : "=&r" (__e), "=r" (__r2) \ : "0" (__p), "r" (__l) \ - : __GUP_CLOBBER_##__s) + : "ip", "lr", "cc") #else #define __get_user_x_64t __get_user_x #endif @@ -194,12 +180,13 @@ extern int __get_user_64t_4(void *); #define __get_user_check(x, p) \ ({ \ - unsigned long __limit = current_thread_info()->addr_limit - 1; \ + unsigned long __limit = TASK_SIZE - 1; \ register typeof(*(p)) __user *__p asm("r0") = (p); \ register __inttype(x) __r2 asm("r2"); \ register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ unsigned int __ua_flags = uaccess_save_and_enable(); \ + int __tmp_e; \ switch (sizeof(*(__p))) { \ case 1: \ if (sizeof((x)) >= 8) \ @@ -227,9 +214,10 @@ extern int __get_user_64t_4(void *); break; \ default: __e = __get_user_bad(); break; \ } \ + __tmp_e = __e; \ uaccess_restore(__ua_flags); \ x = (typeof(*(p))) __r2; \ - __e; \ + __tmp_e; \ }) #define get_user(x, p) \ @@ -245,7 +233,7 @@ extern int __put_user_8(void *, unsigned long long); #define __put_user_check(__pu_val, __ptr, __err, __s) \ ({ \ - unsigned long __limit = current_thread_info()->addr_limit - 1; \ + unsigned long __limit = TASK_SIZE - 1; \ register typeof(__pu_val) __r2 asm("r2") = __pu_val; \ register const void __user *__p asm("r0") = __ptr; \ register unsigned long __l asm("r1") = __limit; \ @@ -262,29 +250,12 @@ extern int __put_user_8(void *, unsigned long long); #else /* CONFIG_MMU */ -/* - * uClinux has only one addr space, so has simplified address limits. - */ -#define USER_DS KERNEL_DS - -#define uaccess_kernel() (true) -#define __addr_ok(addr) ((void)(addr), 1) -#define __range_ok(addr, size) ((void)(addr), 0) -#define get_fs() (KERNEL_DS) - -static inline void set_fs(mm_segment_t fs) -{ -} - #define get_user(x, p) __get_user(x, p) #define __put_user_check __put_user_nocheck #endif /* CONFIG_MMU */ -#define access_ok(addr, size) (__range_ok(addr, size) == 0) - -#define user_addr_max() \ - (uaccess_kernel() ? ~0UL : get_fs()) +#include <asm-generic/access_ok.h> #ifdef CONFIG_CPU_SPECTRE /* @@ -308,31 +279,40 @@ static inline void set_fs(mm_segment_t fs) #define __get_user(x, ptr) \ ({ \ long __gu_err = 0; \ - __get_user_err((x), (ptr), __gu_err); \ + __get_user_err((x), (ptr), __gu_err, TUSER()); \ __gu_err; \ }) -#define __get_user_err(x, ptr, err) \ +/* + * This is a type: either unsigned long, if the argument fits into + * that type, or otherwise unsigned long long. + */ +#define __long_type(x) \ + __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) + +#define __get_user_err(x, ptr, err, __t) \ do { \ unsigned long __gu_addr = (unsigned long)(ptr); \ - unsigned long __gu_val; \ + __long_type(x) __gu_val; \ unsigned int __ua_flags; \ __chk_user_ptr(ptr); \ might_fault(); \ __ua_flags = uaccess_save_and_enable(); \ switch (sizeof(*(ptr))) { \ - case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ - case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ - case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ + case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \ + case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \ + case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \ + case 8: __get_user_asm_dword(__gu_val, __gu_addr, err, __t); break; \ default: (__gu_val) = __get_user_bad(); \ } \ uaccess_restore(__ua_flags); \ (x) = (__typeof__(*(ptr)))__gu_val; \ } while (0) +#endif #define __get_user_asm(x, addr, err, instr) \ __asm__ __volatile__( \ - "1: " TUSER(instr) " %1, [%2], #0\n" \ + "1: " instr " %1, [%2], #0\n" \ "2:\n" \ " .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ @@ -348,40 +328,54 @@ do { \ : "r" (addr), "i" (-EFAULT) \ : "cc") -#define __get_user_asm_byte(x, addr, err) \ - __get_user_asm(x, addr, err, ldrb) +#define __get_user_asm_byte(x, addr, err, __t) \ + __get_user_asm(x, addr, err, "ldrb" __t) #if __LINUX_ARM_ARCH__ >= 6 -#define __get_user_asm_half(x, addr, err) \ - __get_user_asm(x, addr, err, ldrh) +#define __get_user_asm_half(x, addr, err, __t) \ + __get_user_asm(x, addr, err, "ldrh" __t) #else #ifndef __ARMEB__ -#define __get_user_asm_half(x, __gu_addr, err) \ +#define __get_user_asm_half(x, __gu_addr, err, __t) \ ({ \ unsigned long __b1, __b2; \ - __get_user_asm_byte(__b1, __gu_addr, err); \ - __get_user_asm_byte(__b2, __gu_addr + 1, err); \ + __get_user_asm_byte(__b1, __gu_addr, err, __t); \ + __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \ (x) = __b1 | (__b2 << 8); \ }) #else -#define __get_user_asm_half(x, __gu_addr, err) \ +#define __get_user_asm_half(x, __gu_addr, err, __t) \ ({ \ unsigned long __b1, __b2; \ - __get_user_asm_byte(__b1, __gu_addr, err); \ - __get_user_asm_byte(__b2, __gu_addr + 1, err); \ + __get_user_asm_byte(__b1, __gu_addr, err, __t); \ + __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \ (x) = (__b1 << 8) | __b2; \ }) #endif #endif /* __LINUX_ARM_ARCH__ >= 6 */ -#define __get_user_asm_word(x, addr, err) \ - __get_user_asm(x, addr, err, ldr) +#define __get_user_asm_word(x, addr, err, __t) \ + __get_user_asm(x, addr, err, "ldr" __t) + +#ifdef __ARMEB__ +#define __WORD0_OFFS 4 +#define __WORD1_OFFS 0 +#else +#define __WORD0_OFFS 0 +#define __WORD1_OFFS 4 #endif +#define __get_user_asm_dword(x, addr, err, __t) \ + ({ \ + unsigned long __w0, __w1; \ + __get_user_asm(__w0, addr + __WORD0_OFFS, err, "ldr" __t); \ + __get_user_asm(__w1, addr + __WORD1_OFFS, err, "ldr" __t); \ + (x) = ((u64)__w1 << 32) | (u64) __w0; \ +}) #define __put_user_switch(x, ptr, __err, __fn) \ do { \ @@ -425,7 +419,7 @@ do { \ #define __put_user_nocheck(x, __pu_ptr, __err, __size) \ do { \ unsigned long __pu_addr = (unsigned long)__pu_ptr; \ - __put_user_nocheck_##__size(x, __pu_addr, __err); \ + __put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\ } while (0) #define __put_user_nocheck_1 __put_user_asm_byte @@ -433,9 +427,11 @@ do { \ #define __put_user_nocheck_4 __put_user_asm_word #define __put_user_nocheck_8 __put_user_asm_dword +#endif /* !CONFIG_CPU_SPECTRE */ + #define __put_user_asm(x, __pu_addr, err, instr) \ __asm__ __volatile__( \ - "1: " TUSER(instr) " %1, [%2], #0\n" \ + "1: " instr " %1, [%2], #0\n" \ "2:\n" \ " .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ @@ -450,36 +446,36 @@ do { \ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ : "cc") -#define __put_user_asm_byte(x, __pu_addr, err) \ - __put_user_asm(x, __pu_addr, err, strb) +#define __put_user_asm_byte(x, __pu_addr, err, __t) \ + __put_user_asm(x, __pu_addr, err, "strb" __t) #if __LINUX_ARM_ARCH__ >= 6 -#define __put_user_asm_half(x, __pu_addr, err) \ - __put_user_asm(x, __pu_addr, err, strh) +#define __put_user_asm_half(x, __pu_addr, err, __t) \ + __put_user_asm(x, __pu_addr, err, "strh" __t) #else #ifndef __ARMEB__ -#define __put_user_asm_half(x, __pu_addr, err) \ +#define __put_user_asm_half(x, __pu_addr, err, __t) \ ({ \ unsigned long __temp = (__force unsigned long)(x); \ - __put_user_asm_byte(__temp, __pu_addr, err); \ - __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ + __put_user_asm_byte(__temp, __pu_addr, err, __t); \ + __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\ }) #else -#define __put_user_asm_half(x, __pu_addr, err) \ +#define __put_user_asm_half(x, __pu_addr, err, __t) \ ({ \ unsigned long __temp = (__force unsigned long)(x); \ - __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ - __put_user_asm_byte(__temp, __pu_addr + 1, err); \ + __put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \ + __put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \ }) #endif #endif /* __LINUX_ARM_ARCH__ >= 6 */ -#define __put_user_asm_word(x, __pu_addr, err) \ - __put_user_asm(x, __pu_addr, err, str) +#define __put_user_asm_word(x, __pu_addr, err, __t) \ + __put_user_asm(x, __pu_addr, err, "str" __t) #ifndef __ARMEB__ #define __reg_oper0 "%R2" @@ -489,12 +485,12 @@ do { \ #define __reg_oper1 "%R2" #endif -#define __put_user_asm_dword(x, __pu_addr, err) \ +#define __put_user_asm_dword(x, __pu_addr, err, __t) \ __asm__ __volatile__( \ - ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \ - ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \ - THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \ - THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \ + ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \ + ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \ + THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \ + THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \ "3:\n" \ " .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ @@ -510,7 +506,52 @@ do { \ : "r" (x), "i" (-EFAULT) \ : "cc") -#endif /* !CONFIG_CPU_SPECTRE */ +#define __get_kernel_nofault(dst, src, type, err_label) \ +do { \ + const type *__pk_ptr = (src); \ + unsigned long __src = (unsigned long)(__pk_ptr); \ + type __val; \ + int __err = 0; \ + switch (sizeof(type)) { \ + case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \ + case 2: __get_user_asm_half(__val, __src, __err, ""); break; \ + case 4: __get_user_asm_word(__val, __src, __err, ""); break; \ + case 8: { \ + u32 *__v32 = (u32*)&__val; \ + __get_user_asm_word(__v32[0], __src, __err, ""); \ + if (__err) \ + break; \ + __get_user_asm_word(__v32[1], __src+4, __err, ""); \ + break; \ + } \ + default: __err = __get_user_bad(); break; \ + } \ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) \ + put_unaligned(__val, (type *)(dst)); \ + else \ + *(type *)(dst) = __val; /* aligned by caller */ \ + if (__err) \ + goto err_label; \ +} while (0) + +#define __put_kernel_nofault(dst, src, type, err_label) \ +do { \ + const type *__pk_ptr = (dst); \ + unsigned long __dst = (unsigned long)__pk_ptr; \ + int __err = 0; \ + type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \ + ? get_unaligned((type *)(src)) \ + : *(type *)(src); /* aligned by caller */ \ + switch (sizeof(type)) { \ + case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \ + case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \ + case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \ + case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \ + default: __err = __put_user_bad(); break; \ + } \ + if (__err) \ + goto err_label; \ +} while (0) #ifdef CONFIG_MMU extern unsigned long __must_check |
