diff options
Diffstat (limited to 'arch/arm/include/asm/uaccess.h')
| -rw-r--r-- | arch/arm/include/asm/uaccess.h | 120 |
1 files changed, 77 insertions, 43 deletions
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 36fbc3329252..d6ae80b5df36 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -8,10 +8,14 @@ /* * User space memory access functions */ +#include <linux/kernel.h> #include <linux/string.h> -#include <asm/memory.h> +#include <asm/page.h> #include <asm/domain.h> +#include <linux/unaligned.h> #include <asm/unified.h> +#include <asm/pgtable.h> +#include <asm/proc-fns.h> #include <asm/compiler.h> #include <asm/extable.h> @@ -22,9 +26,10 @@ * perform such accesses (eg, via list poison values) which could then * be exploited for priviledge escalation. */ +#if defined(CONFIG_CPU_SW_DOMAIN_PAN) + static __always_inline unsigned int uaccess_save_and_enable(void) { -#ifdef CONFIG_CPU_SW_DOMAIN_PAN unsigned int old_domain = get_domain(); /* Set the current domain access to permit user accesses */ @@ -32,19 +37,49 @@ static __always_inline unsigned int uaccess_save_and_enable(void) domain_val(DOMAIN_USER, DOMAIN_CLIENT)); return old_domain; -#else - return 0; -#endif } static __always_inline void uaccess_restore(unsigned int flags) { -#ifdef CONFIG_CPU_SW_DOMAIN_PAN /* Restore the user access mask */ set_domain(flags); -#endif } +#elif defined(CONFIG_CPU_TTBR0_PAN) + +static __always_inline unsigned int uaccess_save_and_enable(void) +{ + unsigned int old_ttbcr = cpu_get_ttbcr(); + + /* + * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from + * TTBR0 (A1 = 0). + */ + cpu_set_ttbcr(old_ttbcr & ~(TTBCR_A1 | TTBCR_EPD0 | TTBCR_T0SZ_MASK)); + isb(); + + return old_ttbcr; +} + +static inline void uaccess_restore(unsigned int flags) +{ + cpu_set_ttbcr(flags); + isb(); +} + +#else + +static inline unsigned int uaccess_save_and_enable(void) +{ + return 0; +} + +static inline void uaccess_restore(unsigned int flags) +{ +} + +#endif + /* * These two are intentionally not defined anywhere - if the kernel * code generates any references to them, that's a bug. @@ -55,21 +90,6 @@ extern int __put_user_bad(void); #ifdef CONFIG_MMU /* - * We use 33-bit arithmetic here. Success returns zero, failure returns - * addr_limit. We take advantage that addr_limit will be zero for KERNEL_DS, - * so this will always return success in that case. - */ -#define __range_ok(addr, size) ({ \ - unsigned long flag, roksum; \ - __chk_user_ptr(addr); \ - __asm__(".syntax unified\n" \ - "adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \ - : "=&r" (flag), "=&r" (roksum) \ - : "r" (addr), "Ir" (size), "0" (TASK_SIZE) \ - : "cc"); \ - flag; }) - -/* * This is a type: either unsigned long, if the argument fits into * that type, or otherwise unsigned long long. */ @@ -123,16 +143,6 @@ extern int __get_user_64t_1(void *); extern int __get_user_64t_2(void *); extern int __get_user_64t_4(void *); -#define __GUP_CLOBBER_1 "lr", "cc" -#ifdef CONFIG_CPU_USE_DOMAINS -#define __GUP_CLOBBER_2 "ip", "lr", "cc" -#else -#define __GUP_CLOBBER_2 "lr", "cc" -#endif -#define __GUP_CLOBBER_4 "lr", "cc" -#define __GUP_CLOBBER_32t_8 "lr", "cc" -#define __GUP_CLOBBER_8 "lr", "cc" - #define __get_user_x(__r2, __p, __e, __l, __s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%1", "r2") \ @@ -140,7 +150,7 @@ extern int __get_user_64t_4(void *); "bl __get_user_" #__s \ : "=&r" (__e), "=r" (__r2) \ : "0" (__p), "r" (__l) \ - : __GUP_CLOBBER_##__s) + : "ip", "lr", "cc") /* narrowing a double-word get into a single 32bit word register: */ #ifdef __ARMEB__ @@ -162,7 +172,7 @@ extern int __get_user_64t_4(void *); "bl __get_user_64t_" #__s \ : "=&r" (__e), "=r" (__r2) \ : "0" (__p), "r" (__l) \ - : __GUP_CLOBBER_##__s) + : "ip", "lr", "cc") #else #define __get_user_x_64t __get_user_x #endif @@ -240,15 +250,12 @@ extern int __put_user_8(void *, unsigned long long); #else /* CONFIG_MMU */ -#define __addr_ok(addr) ((void)(addr), 1) -#define __range_ok(addr, size) ((void)(addr), 0) - #define get_user(x, p) __get_user(x, p) #define __put_user_check __put_user_nocheck #endif /* CONFIG_MMU */ -#define access_ok(addr, size) (__range_ok(addr, size) == 0) +#include <asm-generic/access_ok.h> #ifdef CONFIG_CPU_SPECTRE /* @@ -276,10 +283,17 @@ extern int __put_user_8(void *, unsigned long long); __gu_err; \ }) +/* + * This is a type: either unsigned long, if the argument fits into + * that type, or otherwise unsigned long long. + */ +#define __long_type(x) \ + __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) + #define __get_user_err(x, ptr, err, __t) \ do { \ unsigned long __gu_addr = (unsigned long)(ptr); \ - unsigned long __gu_val; \ + __long_type(x) __gu_val; \ unsigned int __ua_flags; \ __chk_user_ptr(ptr); \ might_fault(); \ @@ -288,6 +302,7 @@ do { \ case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \ case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \ case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \ + case 8: __get_user_asm_dword(__gu_val, __gu_addr, err, __t); break; \ default: (__gu_val) = __get_user_bad(); \ } \ uaccess_restore(__ua_flags); \ @@ -346,6 +361,22 @@ do { \ #define __get_user_asm_word(x, addr, err, __t) \ __get_user_asm(x, addr, err, "ldr" __t) +#ifdef __ARMEB__ +#define __WORD0_OFFS 4 +#define __WORD1_OFFS 0 +#else +#define __WORD0_OFFS 0 +#define __WORD1_OFFS 4 +#endif + +#define __get_user_asm_dword(x, addr, err, __t) \ + ({ \ + unsigned long __w0, __w1; \ + __get_user_asm(__w0, addr + __WORD0_OFFS, err, "ldr" __t); \ + __get_user_asm(__w1, addr + __WORD1_OFFS, err, "ldr" __t); \ + (x) = ((u64)__w1 << 32) | (u64) __w0; \ +}) + #define __put_user_switch(x, ptr, __err, __fn) \ do { \ const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \ @@ -475,8 +506,6 @@ do { \ : "r" (x), "i" (-EFAULT) \ : "cc") -#define HAVE_GET_KERNEL_NOFAULT - #define __get_kernel_nofault(dst, src, type, err_label) \ do { \ const type *__pk_ptr = (src); \ @@ -497,7 +526,10 @@ do { \ } \ default: __err = __get_user_bad(); break; \ } \ - *(type *)(dst) = __val; \ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) \ + put_unaligned(__val, (type *)(dst)); \ + else \ + *(type *)(dst) = __val; /* aligned by caller */ \ if (__err) \ goto err_label; \ } while (0) @@ -507,7 +539,9 @@ do { \ const type *__pk_ptr = (dst); \ unsigned long __dst = (unsigned long)__pk_ptr; \ int __err = 0; \ - type __val = *(type *)src; \ + type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \ + ? get_unaligned((type *)(src)) \ + : *(type *)(src); /* aligned by caller */ \ switch (sizeof(type)) { \ case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \ case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \ |
