summaryrefslogtreecommitdiff
path: root/arch/s390/include/asm/uaccess.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm/uaccess.h')
-rw-r--r--arch/s390/include/asm/uaccess.h790
1 files changed, 470 insertions, 320 deletions
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 9c33ed4e666f..c5e02addcd67 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -1,8 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* S390 version
* Copyright IBM Corp. 1999, 2000
* Author(s): Hartmut Penner (hp@de.ibm.com),
- * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/uaccess.h"
*/
@@ -12,374 +13,523 @@
/*
* User space memory access functions
*/
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <asm/ctl_reg.h>
-
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
-
-/*
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- */
-
-#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
-
-
-#define KERNEL_DS MAKE_MM_SEG(0)
-#define USER_DS MAKE_MM_SEG(1)
-
-#define get_ds() (KERNEL_DS)
-#define get_fs() (current->thread.mm_segment)
-
-#define set_fs(x) \
-({ \
- unsigned long __pto; \
- current->thread.mm_segment = (x); \
- __pto = current->thread.mm_segment.ar4 ? \
- S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
- __ctl_load(__pto, 7, 7); \
-})
+#include <linux/pgtable.h>
+#include <asm/asm-extable.h>
+#include <asm/processor.h>
+#include <asm/extable.h>
+#include <asm/facility.h>
+#include <asm-generic/access_ok.h>
+#include <asm/asce.h>
+#include <linux/instrumented.h>
+
+void debug_user_asce(int exit);
+
+#ifdef CONFIG_KMSAN
+#define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory
+#else
+#define uaccess_kmsan_or_inline __always_inline
+#endif
-#define segment_eq(a,b) ((a).ar4 == (b).ar4)
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
-static inline int __range_ok(unsigned long addr, unsigned long size)
+static uaccess_kmsan_or_inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long size)
{
- return 1;
+ unsigned long osize;
+ int cc;
+
+ while (1) {
+ osize = size;
+ asm_inline volatile(
+ " lhi %%r0,%[spec]\n"
+ "0: mvcos %[to],%[from],%[size]\n"
+ "1: nopr %%r7\n"
+ CC_IPM(cc)
+ EX_TABLE_UA_MVCOS_FROM(0b, 0b)
+ EX_TABLE_UA_MVCOS_FROM(1b, 0b)
+ : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char *)to)
+ : [spec] "I" (0x81), [from] "Q" (*(const char __user *)from)
+ : CC_CLOBBER_LIST("memory", "0"));
+ if (__builtin_constant_p(osize) && osize <= 4096)
+ return osize - size;
+ if (likely(CC_TRANSFORM(cc) == 0))
+ return osize - size;
+ size -= 4096;
+ to += 4096;
+ from += 4096;
+ }
}
-#define __access_ok(addr, size) \
-({ \
- __chk_user_ptr(addr); \
- __range_ok((unsigned long)(addr), (size)); \
-})
-
-#define access_ok(type, addr, size) __access_ok(addr, size)
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
+static uaccess_kmsan_or_inline __must_check unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long size)
{
- int insn, fixup;
-};
-
-static inline unsigned long extable_insn(const struct exception_table_entry *x)
-{
- return (unsigned long)&x->insn + x->insn;
+ unsigned long osize;
+ int cc;
+
+ while (1) {
+ osize = size;
+ asm_inline volatile(
+ " llilh %%r0,%[spec]\n"
+ "0: mvcos %[to],%[from],%[size]\n"
+ "1: nopr %%r7\n"
+ CC_IPM(cc)
+ EX_TABLE_UA_MVCOS_TO(0b, 0b)
+ EX_TABLE_UA_MVCOS_TO(1b, 0b)
+ : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to)
+ : [spec] "I" (0x81), [from] "Q" (*(const char *)from)
+ : CC_CLOBBER_LIST("memory", "0"));
+ if (__builtin_constant_p(osize) && osize <= 4096)
+ return osize - size;
+ if (likely(CC_TRANSFORM(cc) == 0))
+ return osize - size;
+ size -= 4096;
+ to += 4096;
+ from += 4096;
+ }
}
-static inline unsigned long extable_fixup(const struct exception_table_entry *x)
+unsigned long __must_check
+_copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key);
+
+static __always_inline unsigned long __must_check
+copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key)
{
- return (unsigned long)&x->fixup + x->fixup;
+ if (check_copy_size(to, n, false))
+ n = _copy_from_user_key(to, from, n, key);
+ return n;
}
-#define ARCH_HAS_SORT_EXTABLE
-#define ARCH_HAS_SEARCH_EXTABLE
-
-struct uaccess_ops {
- size_t (*copy_from_user)(size_t, const void __user *, void *);
- size_t (*copy_from_user_small)(size_t, const void __user *, void *);
- size_t (*copy_to_user)(size_t, void __user *, const void *);
- size_t (*copy_to_user_small)(size_t, void __user *, const void *);
- size_t (*copy_in_user)(size_t, void __user *, const void __user *);
- size_t (*clear_user)(size_t, void __user *);
- size_t (*strnlen_user)(size_t, const char __user *);
- size_t (*strncpy_from_user)(size_t, const char __user *, char *);
- int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
- int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
-};
-
-extern struct uaccess_ops uaccess;
-extern struct uaccess_ops uaccess_std;
-extern struct uaccess_ops uaccess_mvcos;
-extern struct uaccess_ops uaccess_mvcos_switch;
-extern struct uaccess_ops uaccess_pt;
-
-extern int __handle_fault(unsigned long, unsigned long, int);
-
-static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
+unsigned long __must_check
+_copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key);
+
+static __always_inline unsigned long __must_check
+copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key)
{
- size = uaccess.copy_to_user_small(size, ptr, x);
- return size ? -EFAULT : size;
+ if (check_copy_size(from, n, true))
+ n = _copy_to_user_key(to, from, n, key);
+ return n;
}
-static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
-{
- size = uaccess.copy_from_user_small(size, ptr, x);
- return size ? -EFAULT : size;
+int __noreturn __put_user_bad(void);
+
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
+#define DEFINE_PUT_USER_NOINSTR(type) \
+static uaccess_kmsan_or_inline int \
+__put_user_##type##_noinstr(unsigned type __user *to, \
+ unsigned type *from, \
+ unsigned long size) \
+{ \
+ asm goto( \
+ " llilh %%r0,%[spec]\n" \
+ "0: mvcos %[to],%[from],%[size]\n" \
+ "1: nopr %%r7\n" \
+ EX_TABLE(0b, %l[Efault]) \
+ EX_TABLE(1b, %l[Efault]) \
+ : [to] "+Q" (*to) \
+ : [size] "d" (size), [from] "Q" (*from), \
+ [spec] "I" (0x81) \
+ : "cc", "0" \
+ : Efault \
+ ); \
+ return 0; \
+Efault: \
+ return -EFAULT; \
}
-/*
- * These are the main single-value transfer routines. They automatically
- * use the right size if we just have the right pointer type.
- */
-#define __put_user(x, ptr) \
-({ \
- __typeof__(*(ptr)) __x = (x); \
- int __pu_err = -EFAULT; \
- __chk_user_ptr(ptr); \
- switch (sizeof (*(ptr))) { \
- case 1: \
- case 2: \
- case 4: \
- case 8: \
- __pu_err = __put_user_fn(sizeof (*(ptr)), \
- ptr, &__x); \
- break; \
- default: \
- __put_user_bad(); \
- break; \
- } \
- __pu_err; \
-})
+#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
+#define DEFINE_PUT_USER_NOINSTR(type) \
+static uaccess_kmsan_or_inline int \
+__put_user_##type##_noinstr(unsigned type __user *to, \
+ unsigned type *from, \
+ unsigned long size) \
+{ \
+ int rc; \
+ \
+ asm_inline volatile( \
+ " llilh %%r0,%[spec]\n" \
+ "0: mvcos %[to],%[from],%[size]\n" \
+ "1: lhi %[rc],0\n" \
+ "2:\n" \
+ EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \
+ EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \
+ : [rc] "=d" (rc), [to] "+Q" (*to) \
+ : [size] "d" (size), [from] "Q" (*from), \
+ [spec] "I" (0x81) \
+ : "cc", "0"); \
+ return rc; \
+}
-#define put_user(x, ptr) \
-({ \
- might_fault(); \
- __put_user(x, ptr); \
-})
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
+DEFINE_PUT_USER_NOINSTR(char);
+DEFINE_PUT_USER_NOINSTR(short);
+DEFINE_PUT_USER_NOINSTR(int);
+DEFINE_PUT_USER_NOINSTR(long);
+
+#define DEFINE_PUT_USER(type) \
+static __always_inline int \
+__put_user_##type(unsigned type __user *to, unsigned type *from, \
+ unsigned long size) \
+{ \
+ int rc; \
+ \
+ rc = __put_user_##type##_noinstr(to, from, size); \
+ instrument_put_user(*from, to, size); \
+ return rc; \
+}
+DEFINE_PUT_USER(char);
+DEFINE_PUT_USER(short);
+DEFINE_PUT_USER(int);
+DEFINE_PUT_USER(long);
-extern int __put_user_bad(void) __attribute__((noreturn));
-
-#define __get_user(x, ptr) \
-({ \
- int __gu_err = -EFAULT; \
- __chk_user_ptr(ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: { \
- unsigned char __x; \
- __gu_err = __get_user_fn(sizeof (*(ptr)), \
- ptr, &__x); \
- (x) = *(__force __typeof__(*(ptr)) *) &__x; \
- break; \
- }; \
- case 2: { \
- unsigned short __x; \
- __gu_err = __get_user_fn(sizeof (*(ptr)), \
- ptr, &__x); \
- (x) = *(__force __typeof__(*(ptr)) *) &__x; \
- break; \
- }; \
- case 4: { \
- unsigned int __x; \
- __gu_err = __get_user_fn(sizeof (*(ptr)), \
- ptr, &__x); \
- (x) = *(__force __typeof__(*(ptr)) *) &__x; \
- break; \
- }; \
- case 8: { \
- unsigned long long __x; \
- __gu_err = __get_user_fn(sizeof (*(ptr)), \
- ptr, &__x); \
- (x) = *(__force __typeof__(*(ptr)) *) &__x; \
- break; \
- }; \
- default: \
- __get_user_bad(); \
- break; \
- } \
- __gu_err; \
+#define __put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __x = (x); \
+ int __prc; \
+ \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __prc = __put_user_char((unsigned char __user *)(ptr), \
+ (unsigned char *)&__x, \
+ sizeof(*(ptr))); \
+ break; \
+ case 2: \
+ __prc = __put_user_short((unsigned short __user *)(ptr),\
+ (unsigned short *)&__x, \
+ sizeof(*(ptr))); \
+ break; \
+ case 4: \
+ __prc = __put_user_int((unsigned int __user *)(ptr), \
+ (unsigned int *)&__x, \
+ sizeof(*(ptr))); \
+ break; \
+ case 8: \
+ __prc = __put_user_long((unsigned long __user *)(ptr), \
+ (unsigned long *)&__x, \
+ sizeof(*(ptr))); \
+ break; \
+ default: \
+ __prc = __put_user_bad(); \
+ break; \
+ } \
+ __builtin_expect(__prc, 0); \
})
-#define get_user(x, ptr) \
-({ \
- might_fault(); \
- __get_user(x, ptr); \
+#define put_user(x, ptr) \
+({ \
+ might_fault(); \
+ __put_user(x, ptr); \
})
-extern int __get_user_bad(void) __attribute__((noreturn));
-
-#define __put_user_unaligned __put_user
-#define __get_user_unaligned __get_user
-
-/**
- * __copy_to_user: - Copy a block of data into user space, with less checking.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-static inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- if (__builtin_constant_p(n) && (n <= 256))
- return uaccess.copy_to_user_small(n, to, from);
- else
- return uaccess.copy_to_user(n, to, from);
+int __noreturn __get_user_bad(void);
+
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
+#define DEFINE_GET_USER_NOINSTR(type) \
+static uaccess_kmsan_or_inline int \
+__get_user_##type##_noinstr(unsigned type *to, \
+ const unsigned type __user *from, \
+ unsigned long size) \
+{ \
+ asm goto( \
+ " lhi %%r0,%[spec]\n" \
+ "0: mvcos %[to],%[from],%[size]\n" \
+ "1: nopr %%r7\n" \
+ EX_TABLE(0b, %l[Efault]) \
+ EX_TABLE(1b, %l[Efault]) \
+ : [to] "=Q" (*to) \
+ : [size] "d" (size), [from] "Q" (*from), \
+ [spec] "I" (0x81) \
+ : "cc", "0" \
+ : Efault \
+ ); \
+ return 0; \
+Efault: \
+ *to = 0; \
+ return -EFAULT; \
}
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
-/**
- * copy_to_user: - Copy a block of data into user space.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep.
- *
- * Copy data from kernel space to user space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-static inline unsigned long __must_check
-copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- might_fault();
- return __copy_to_user(to, from, n);
+#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
+#define DEFINE_GET_USER_NOINSTR(type) \
+static uaccess_kmsan_or_inline int \
+__get_user_##type##_noinstr(unsigned type *to, \
+ const unsigned type __user *from, \
+ unsigned long size) \
+{ \
+ int rc; \
+ \
+ asm_inline volatile( \
+ " lhi %%r0,%[spec]\n" \
+ "0: mvcos %[to],%[from],%[size]\n" \
+ "1: lhi %[rc],0\n" \
+ "2:\n" \
+ EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \
+ EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \
+ : [rc] "=d" (rc), [to] "=Q" (*to) \
+ : [size] "d" (size), [from] "Q" (*from), \
+ [spec] "I" (0x81) \
+ : "cc", "0"); \
+ if (likely(!rc)) \
+ return 0; \
+ *to = 0; \
+ return rc; \
}
-/**
- * __copy_from_user: - Copy a block of data from user space, with less checking.
- * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-static inline unsigned long __must_check
-__copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- if (__builtin_constant_p(n) && (n <= 256))
- return uaccess.copy_from_user_small(n, from, to);
- else
- return uaccess.copy_from_user(n, from, to);
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
+DEFINE_GET_USER_NOINSTR(char);
+DEFINE_GET_USER_NOINSTR(short);
+DEFINE_GET_USER_NOINSTR(int);
+DEFINE_GET_USER_NOINSTR(long);
+
+#define DEFINE_GET_USER(type) \
+static __always_inline int \
+__get_user_##type(unsigned type *to, const unsigned type __user *from, \
+ unsigned long size) \
+{ \
+ int rc; \
+ \
+ rc = __get_user_##type##_noinstr(to, from, size); \
+ instrument_get_user(*to); \
+ return rc; \
}
-extern void copy_from_user_overflow(void)
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-__compiletime_warning("copy_from_user() buffer size is not provably correct")
-#endif
-;
+DEFINE_GET_USER(char);
+DEFINE_GET_USER(short);
+DEFINE_GET_USER(int);
+DEFINE_GET_USER(long);
-/**
- * copy_from_user: - Copy a block of data from user space.
- * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep.
- *
- * Copy data from user space to kernel space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
+#define __get_user(x, ptr) \
+({ \
+ const __user void *____guptr = (ptr); \
+ int __grc; \
+ \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: { \
+ const unsigned char __user *__guptr = ____guptr; \
+ unsigned char __x; \
+ \
+ __grc = __get_user_char(&__x, __guptr, sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
+ break; \
+ }; \
+ case 2: { \
+ const unsigned short __user *__guptr = ____guptr; \
+ unsigned short __x; \
+ \
+ __grc = __get_user_short(&__x, __guptr, sizeof(*(ptr)));\
+ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
+ break; \
+ }; \
+ case 4: { \
+ const unsigned int __user *__guptr = ____guptr; \
+ unsigned int __x; \
+ \
+ __grc = __get_user_int(&__x, __guptr, sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
+ break; \
+ }; \
+ case 8: { \
+ const unsigned long __user *__guptr = ____guptr; \
+ unsigned long __x; \
+ \
+ __grc = __get_user_long(&__x, __guptr, sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
+ break; \
+ }; \
+ default: \
+ __grc = __get_user_bad(); \
+ break; \
+ } \
+ __builtin_expect(__grc, 0); \
+})
+
+#define get_user(x, ptr) \
+({ \
+ might_fault(); \
+ __get_user(x, ptr); \
+})
+
+/*
+ * Copy a null terminated string from userspace.
*/
-static inline unsigned long __must_check
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- unsigned int sz = __compiletime_object_size(to);
+long __must_check strncpy_from_user(char *dst, const char __user *src, long count);
- might_fault();
- if (unlikely(sz != -1 && sz < n)) {
- copy_from_user_overflow();
- return n;
- }
- return __copy_from_user(to, from, n);
-}
+long __must_check strnlen_user(const char __user *src, long count);
-static inline unsigned long __must_check
-__copy_in_user(void __user *to, const void __user *from, unsigned long n)
+static uaccess_kmsan_or_inline __must_check unsigned long
+__clear_user(void __user *to, unsigned long size)
{
- return uaccess.copy_in_user(n, to, from);
+ unsigned long osize;
+ int cc;
+
+ while (1) {
+ osize = size;
+ asm_inline volatile(
+ " llilh %%r0,%[spec]\n"
+ "0: mvcos %[to],%[from],%[size]\n"
+ "1: nopr %%r7\n"
+ CC_IPM(cc)
+ EX_TABLE_UA_MVCOS_TO(0b, 0b)
+ EX_TABLE_UA_MVCOS_TO(1b, 0b)
+ : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to)
+ : [spec] "I" (0x81), [from] "Q" (*(const char *)empty_zero_page)
+ : CC_CLOBBER_LIST("memory", "0"));
+ if (__builtin_constant_p(osize) && osize <= 4096)
+ return osize - size;
+ if (CC_TRANSFORM(cc) == 0)
+ return osize - size;
+ size -= 4096;
+ to += 4096;
+ }
}
-static inline unsigned long __must_check
-copy_in_user(void __user *to, const void __user *from, unsigned long n)
+static __always_inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
might_fault();
- return __copy_in_user(to, from, n);
+ return __clear_user(to, n);
}
-/*
- * Copy a null terminated string from userspace.
- */
-static inline long __must_check
-strncpy_from_user(char *dst, const char __user *src, long count)
+void *__s390_kernel_write(void *dst, const void *src, size_t size);
+
+static inline void *s390_kernel_write(void *dst, const void *src, size_t size)
{
- might_fault();
- return uaccess.strncpy_from_user(count, src, dst);
+ if (__is_defined(__DECOMPRESSOR))
+ return memcpy(dst, src, size);
+ return __s390_kernel_write(dst, src, size);
}
-static inline unsigned long
-strnlen_user(const char __user * src, unsigned long n)
+void __noreturn __mvc_kernel_nofault_bad(void);
+
+#if defined(CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && defined(CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS)
+
+#define __mvc_kernel_nofault(dst, src, type, err_label) \
+do { \
+ switch (sizeof(type)) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ case 8: \
+ asm goto( \
+ "0: mvc %O[_dst](%[_len],%R[_dst]),%[_src]\n" \
+ "1: nopr %%r7\n" \
+ EX_TABLE(0b, %l[err_label]) \
+ EX_TABLE(1b, %l[err_label]) \
+ : [_dst] "=Q" (*(type *)dst) \
+ : [_src] "Q" (*(type *)(src)), \
+ [_len] "I" (sizeof(type)) \
+ : \
+ : err_label); \
+ break; \
+ default: \
+ __mvc_kernel_nofault_bad(); \
+ break; \
+ } \
+} while (0)
+
+#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+#define __mvc_kernel_nofault(dst, src, type, err_label) \
+do { \
+ type *(__dst) = (type *)(dst); \
+ int __rc; \
+ \
+ switch (sizeof(type)) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ case 8: \
+ asm_inline volatile( \
+ "0: mvc 0(%[_len],%[_dst]),%[_src]\n" \
+ "1: lhi %[_rc],0\n" \
+ "2:\n" \
+ EX_TABLE_UA_FAULT(0b, 2b, %[_rc]) \
+ EX_TABLE_UA_FAULT(1b, 2b, %[_rc]) \
+ : [_rc] "=d" (__rc), \
+ "=m" (*__dst) \
+ : [_src] "Q" (*(type *)(src)), \
+ [_dst] "a" (__dst), \
+ [_len] "I" (sizeof(type))); \
+ if (__rc) \
+ goto err_label; \
+ break; \
+ default: \
+ __mvc_kernel_nofault_bad(); \
+ break; \
+ } \
+} while (0)
+
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+#define arch_get_kernel_nofault __mvc_kernel_nofault
+#define arch_put_kernel_nofault __mvc_kernel_nofault
+
+void __cmpxchg_user_key_called_with_bad_pointer(void);
+
+int __cmpxchg_user_key1(unsigned long address, unsigned char *uval,
+ unsigned char old, unsigned char new, unsigned long key);
+int __cmpxchg_user_key2(unsigned long address, unsigned short *uval,
+ unsigned short old, unsigned short new, unsigned long key);
+int __cmpxchg_user_key4(unsigned long address, unsigned int *uval,
+ unsigned int old, unsigned int new, unsigned long key);
+int __cmpxchg_user_key8(unsigned long address, unsigned long *uval,
+ unsigned long old, unsigned long new, unsigned long key);
+int __cmpxchg_user_key16(unsigned long address, __uint128_t *uval,
+ __uint128_t old, __uint128_t new, unsigned long key);
+
+static __always_inline int _cmpxchg_user_key(unsigned long address, void *uval,
+ __uint128_t old, __uint128_t new,
+ unsigned long key, int size)
{
- might_fault();
- return uaccess.strnlen_user(n, src);
+ switch (size) {
+ case 1: return __cmpxchg_user_key1(address, uval, old, new, key);
+ case 2: return __cmpxchg_user_key2(address, uval, old, new, key);
+ case 4: return __cmpxchg_user_key4(address, uval, old, new, key);
+ case 8: return __cmpxchg_user_key8(address, uval, old, new, key);
+ case 16: return __cmpxchg_user_key16(address, uval, old, new, key);
+ default: __cmpxchg_user_key_called_with_bad_pointer();
+ }
+ return 0;
}
/**
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
+ * cmpxchg_user_key() - cmpxchg with user space target, honoring storage keys
+ * @ptr: User space address of value to compare to @old and exchange with
+ * @new. Must be aligned to sizeof(*@ptr).
+ * @uval: Address where the old value of *@ptr is written to.
+ * @old: Old value. Compared to the content pointed to by @ptr in order to
+ * determine if the exchange occurs. The old value read from *@ptr is
+ * written to *@uval.
+ * @new: New value to place at *@ptr.
+ * @key: Access key to use for checking storage key protection.
*
- * Context: User context only. This function may sleep.
+ * Perform a cmpxchg on a user space target, honoring storage key protection.
+ * @key alone determines how key checking is performed, neither
+ * storage-protection-override nor fetch-protection-override apply.
+ * The caller must compare *@uval and @old to determine if values have been
+ * exchanged. In case of an exception *@uval is set to zero.
*
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
+ * Return: 0: cmpxchg executed
+ * -EFAULT: an exception happened when trying to access *@ptr
+ * -EAGAIN: maxed out number of retries (byte and short only)
*/
-#define strlen_user(str) strnlen_user(str, ~0UL)
-
-/*
- * Zero Userspace
- */
-
-static inline unsigned long __must_check
-__clear_user(void __user *to, unsigned long n)
-{
- return uaccess.clear_user(n, to);
-}
-
-static inline unsigned long __must_check
-clear_user(void __user *to, unsigned long n)
-{
- might_fault();
- return uaccess.clear_user(n, to);
-}
-
-extern int copy_to_user_real(void __user *dest, void *src, size_t count);
-extern int copy_from_user_real(void *dest, void __user *src, size_t count);
+#define cmpxchg_user_key(ptr, uval, old, new, key) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(uval) __uval = (uval); \
+ \
+ BUILD_BUG_ON(sizeof(*(__ptr)) != sizeof(*(__uval))); \
+ might_fault(); \
+ __chk_user_ptr(__ptr); \
+ _cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval), \
+ (old), (new), (key), sizeof(*(__ptr))); \
+})
#endif /* __S390_UACCESS_H */