summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/uaccess_64.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/uaccess_64.h')
-rw-r--r--arch/x86/include/asm/uaccess_64.h187
1 files changed, 153 insertions, 34 deletions
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index e7265a552f4f..915124011c27 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -11,6 +11,101 @@
#include <asm/alternative.h>
#include <asm/cpufeatures.h>
#include <asm/page.h>
+#include <asm/percpu.h>
+
+#ifdef MODULE
+ #define runtime_const_ptr(sym) (sym)
+#else
+ #include <asm/runtime-const.h>
+#endif
+extern unsigned long USER_PTR_MAX;
+
+#ifdef CONFIG_ADDRESS_MASKING
+/*
+ * Mask out tag bits from the address.
+ */
+static inline unsigned long __untagged_addr(unsigned long addr)
+{
+ asm_inline (ALTERNATIVE("", "and " __percpu_arg([mask]) ", %[addr]",
+ X86_FEATURE_LAM)
+ : [addr] "+r" (addr)
+ : [mask] "m" (__my_cpu_var(tlbstate_untag_mask)));
+
+ return addr;
+}
+
+#define untagged_addr(addr) ({ \
+ unsigned long __addr = (__force unsigned long)(addr); \
+ (__force __typeof__(addr))__untagged_addr(__addr); \
+})
+
+static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
+ unsigned long addr)
+{
+ mmap_assert_locked(mm);
+ return addr & (mm)->context.untag_mask;
+}
+
+#define untagged_addr_remote(mm, addr) ({ \
+ unsigned long __addr = (__force unsigned long)(addr); \
+ (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \
+})
+
+#endif
+
+#define valid_user_address(x) \
+ likely((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX))
+
+/*
+ * Masking the user address is an alternative to a conditional
+ * user_access_begin that can avoid the fencing. This only works
+ * for dense accesses starting at the address.
+ */
+static inline void __user *mask_user_address(const void __user *ptr)
+{
+ void __user *ret;
+ asm("cmp %1,%0\n\t"
+ "cmova %1,%0"
+ :"=r" (ret)
+ :"r" (runtime_const_ptr(USER_PTR_MAX)),
+ "0" (ptr));
+ return ret;
+}
+#define masked_user_access_begin(x) ({ \
+ auto __masked_ptr = (x); \
+ __masked_ptr = mask_user_address(__masked_ptr); \
+ __uaccess_begin(); __masked_ptr; })
+
+/*
+ * User pointers can have tag bits on x86-64. This scheme tolerates
+ * arbitrary values in those bits rather then masking them off.
+ *
+ * Enforce two rules:
+ * 1. 'ptr' must be in the user part of the address space
+ * 2. 'ptr+size' must not overflow into kernel addresses
+ *
+ * Note that we always have at least one guard page between the
+ * max user address and the non-canonical gap, allowing us to
+ * ignore small sizes entirely.
+ *
+ * In fact, we could probably remove the size check entirely, since
+ * any kernel accesses will be in increasing address order starting
+ * at 'ptr'.
+ *
+ * That's a separate optimization, for now just handle the small
+ * constant case.
+ */
+static inline bool __access_ok(const void __user *ptr, unsigned long size)
+{
+ if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
+ return valid_user_address(ptr);
+ } else {
+ unsigned long sum = size + (__force unsigned long)ptr;
+
+ return valid_user_address(sum) && sum >= (__force unsigned long)ptr;
+ }
+}
+#define __access_ok __access_ok
/*
* Copy To/From Userspace
@@ -18,32 +113,26 @@
/* Handles exceptions in both to and from, but doesn't do access_ok */
__must_check unsigned long
-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
-__must_check unsigned long
-copy_user_generic_string(void *to, const void *from, unsigned len);
-__must_check unsigned long
-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
+rep_movs_alternative(void *to, const void *from, unsigned len);
static __always_inline __must_check unsigned long
-copy_user_generic(void *to, const void *from, unsigned len)
+copy_user_generic(void *to, const void *from, unsigned long len)
{
- unsigned ret;
-
+ stac();
/*
- * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
- * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
- * Otherwise, use copy_user_generic_unrolled.
+ * If CPU has FSRM feature, use 'rep movs'.
+ * Otherwise, use rep_movs_alternative.
*/
- alternative_call_2(copy_user_generic_unrolled,
- copy_user_generic_string,
- X86_FEATURE_REP_GOOD,
- copy_user_enhanced_fast_string,
- X86_FEATURE_ERMS,
- ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
- "=d" (len)),
- "1" (to), "2" (from), "3" (len)
- : "memory", "rcx", "r8", "r9", "r10", "r11");
- return ret;
+ asm volatile(
+ "1:\n\t"
+ ALTERNATIVE("rep movsb",
+ "call rep_movs_alternative", ALT_NOT(X86_FEATURE_FSRM))
+ "2:\n"
+ _ASM_EXTABLE_UA(1b, 2b)
+ :"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT
+ : : "memory", "rax");
+ clac();
+ return len;
}
static __always_inline __must_check unsigned long
@@ -58,26 +147,19 @@ raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
return copy_user_generic((__force void *)dst, src, size);
}
-static __always_inline __must_check
-unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
-{
- return copy_user_generic((__force void *)dst,
- (__force void *)src, size);
-}
-
-extern long __copy_user_nocache(void *dst, const void __user *src,
- unsigned size, int zerorest);
-
+extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size);
extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
-extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
- size_t len);
static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
unsigned size)
{
+ long ret;
kasan_check_write(dst, size);
- return __copy_user_nocache(dst, src, size, 0);
+ stac();
+ ret = __copy_user_nocache(dst, src, size);
+ clac();
+ return ret;
}
static inline int
@@ -86,4 +168,41 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
kasan_check_write(dst, size);
return __copy_user_flushcache(dst, src, size);
}
+
+/*
+ * Zero Userspace.
+ */
+
+__must_check unsigned long
+rep_stos_alternative(void __user *addr, unsigned long len);
+
+static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size)
+{
+ might_fault();
+ stac();
+
+ /*
+ * No memory constraint because it doesn't change any memory gcc
+ * knows about.
+ */
+ asm volatile(
+ "1:\n\t"
+ ALTERNATIVE("rep stosb",
+ "call rep_stos_alternative", ALT_NOT(X86_FEATURE_FSRS))
+ "2:\n"
+ _ASM_EXTABLE_UA(1b, 2b)
+ : "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT
+ : "a" (0));
+
+ clac();
+
+ return size;
+}
+
+static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
+{
+ if (__access_ok(to, n))
+ return __clear_user(to, n);
+ return n;
+}
#endif /* _ASM_X86_UACCESS_64_H */