summaryrefslogtreecommitdiff
path: root/arch/mips/include
diff options
context:
space:
mode:
authorThomas Bogendoerfer <tsbogend@alpha.franken.de>2021-04-01 14:56:37 +0200
committerThomas Bogendoerfer <tsbogend@alpha.franken.de>2021-04-06 15:12:58 +0200
commit04324f44cb69a03fdc8f2ee52386a4fdf6a0043b (patch)
tree86fb953a6810ec203a39019017349f5b58719cab /arch/mips/include
parent45deb5faeb9e02951361ceba5ffee721745661c3 (diff)
MIPS: Remove get_fs/set_fs
All get_fs/set_fs calls in MIPS code are gone, so remove implementation of it. With the clear separation of user/kernel space access we no longer need the EVA special handling, so get rid of that, too. Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'arch/mips/include')
-rw-r--r--arch/mips/include/asm/processor.h4
-rw-r--r--arch/mips/include/asm/thread_info.h6
-rw-r--r--arch/mips/include/asm/uaccess.h449
3 files changed, 146 insertions, 313 deletions
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 8e69e0a35ee9..0c3550c82b72 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -221,10 +221,6 @@ struct nlm_cop2_state {
#define COP2_INIT
#endif
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
#ifdef CONFIG_CPU_HAS_MSA
# define ARCH_MIN_TASKALIGN 16
# define FPU_ALIGN __aligned(16)
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index e2c352da3877..0b17aaa9e012 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -28,11 +28,6 @@ struct thread_info {
unsigned long tp_value; /* thread pointer */
__u32 cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
- mm_segment_t addr_limit; /*
- * thread address space limit:
- * 0x7fffffff for user-thead
- * 0xffffffff for kernel-thread
- */
struct pt_regs *regs;
long syscall; /* syscall number */
};
@@ -46,7 +41,6 @@ struct thread_info {
.flags = _TIF_FIXADE, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .addr_limit = KERNEL_DS, \
}
/*
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index c5cab0b8f902..91bc7fb7dca1 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -16,13 +16,6 @@
#include <asm/asm-eva.h>
#include <asm/extable.h>
-/*
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- */
#ifdef CONFIG_32BIT
#define __UA_LIMIT 0x80000000UL
@@ -50,38 +43,6 @@ extern u64 __ua_limit;
#endif /* CONFIG_64BIT */
/*
- * USER_DS is a bitmask that has the bits set that may not be set in a valid
- * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
- * the arithmetic we're doing only works if the limit is a power of two, so
- * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
- * address in this range it's the process's problem, not ours :-)
- */
-
-#define KERNEL_DS ((mm_segment_t) { 0UL })
-#define USER_DS ((mm_segment_t) { __UA_LIMIT })
-
-#define get_fs() (current_thread_info()->addr_limit)
-#define set_fs(x) (current_thread_info()->addr_limit = (x))
-
-#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
-
-/*
- * eva_kernel_access() - determine whether kernel memory access on an EVA system
- *
- * Determines whether memory accesses should be performed to kernel memory
- * on a system using Extended Virtual Addressing (EVA).
- *
- * Return: true if a kernel memory access on an EVA system, else false.
- */
-static inline bool eva_kernel_access(void)
-{
- if (!IS_ENABLED(CONFIG_EVA))
- return false;
-
- return uaccess_kernel();
-}
-
-/*
* Is a address valid? This does a straightforward calculation rather
* than tests.
*
@@ -118,7 +79,7 @@ static inline bool eva_kernel_access(void)
static inline int __access_ok(const void __user *p, unsigned long size)
{
unsigned long addr = (unsigned long)p;
- return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
+ return (__UA_LIMIT & (addr | (addr + size) | __ua_size(size))) == 0;
}
#define access_ok(addr, size) \
@@ -215,43 +176,6 @@ static inline int __access_ok(const void __user *p, unsigned long size)
struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct __user *)(x))
-/*
- * Yuck. We need two variants, one for 64bit operation and one
- * for 32 bit mode and old iron.
- */
-#ifndef CONFIG_EVA
-#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
-#else
-/*
- * Kernel specific functions for EVA. We need to use normal load instructions
- * to read data from kernel when operating in EVA mode. We use these macros to
- * avoid redefining __get_user_asm for EVA.
- */
-#undef _loadd
-#undef _loadw
-#undef _loadh
-#undef _loadb
-#ifdef CONFIG_32BIT
-#define _loadd _loadw
-#else
-#define _loadd(reg, addr) "ld " reg ", " addr
-#endif
-#define _loadw(reg, addr) "lw " reg ", " addr
-#define _loadh(reg, addr) "lh " reg ", " addr
-#define _loadb(reg, addr) "lb " reg ", " addr
-
-#define __get_kernel_common(val, size, ptr) \
-do { \
- switch (size) { \
- case 1: __get_data_asm(val, _loadb, ptr); break; \
- case 2: __get_data_asm(val, _loadh, ptr); break; \
- case 4: __get_data_asm(val, _loadw, ptr); break; \
- case 8: __GET_DW(val, _loadd, ptr); break; \
- default: __get_user_unknown(); break; \
- } \
-} while (0)
-#endif
-
#ifdef CONFIG_32BIT
#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
#endif
@@ -276,12 +200,9 @@ do { \
({ \
int __gu_err; \
\
- if (eva_kernel_access()) { \
- __get_kernel_common((x), size, ptr); \
- } else { \
- __chk_user_ptr(ptr); \
- __get_user_common((x), size, ptr); \
- } \
+ __chk_user_ptr(ptr); \
+ __get_user_common((x), size, ptr); \
+ \
__gu_err; \
})
@@ -291,11 +212,8 @@ do { \
const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
\
might_fault(); \
- if (likely(access_ok( __gu_ptr, size))) { \
- if (eva_kernel_access()) \
- __get_kernel_common((x), size, __gu_ptr); \
- else \
- __get_user_common((x), size, __gu_ptr); \
+ if (likely(access_ok(__gu_ptr, size))) { \
+ __get_user_common((x), size, __gu_ptr); \
} else \
(x) = 0; \
\
@@ -361,46 +279,31 @@ do { \
do { \
int __gu_err; \
\
- __get_kernel_common(*((type *)(dst)), sizeof(type), \
- (__force type *)(src)); \
+ switch (sizeof(type)) { \
+ case 1: \
+ __get_data_asm(*(type *)(dst), kernel_lb, \
+ (__force type *)(src)); \
+ break; \
+ case 2: \
+ __get_data_asm(*(type *)(dst), kernel_lh, \
+ (__force type *)(src)); \
+ break; \
+ case 4: \
+ __get_data_asm(*(type *)(dst), kernel_lw, \
+ (__force type *)(src)); \
+ break; \
+ case 8: \
+ __GET_DW(*(type *)(dst), kernel_ld, \
+ (__force type *)(src)); \
+ break; \
+ default: \
+ __get_user_unknown(); \
+ break; \
+ } \
if (unlikely(__gu_err)) \
goto err_label; \
} while (0)
-#ifndef CONFIG_EVA
-#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
-#else
-/*
- * Kernel specific functions for EVA. We need to use normal load instructions
- * to read data from kernel when operating in EVA mode. We use these macros to
- * avoid redefining __get_data_asm for EVA.
- */
-#undef _stored
-#undef _storew
-#undef _storeh
-#undef _storeb
-#ifdef CONFIG_32BIT
-#define _stored _storew
-#else
-#define _stored(reg, addr) "ld " reg ", " addr
-#endif
-
-#define _storew(reg, addr) "sw " reg ", " addr
-#define _storeh(reg, addr) "sh " reg ", " addr
-#define _storeb(reg, addr) "sb " reg ", " addr
-
-#define __put_kernel_common(ptr, size) \
-do { \
- switch (size) { \
- case 1: __put_data_asm(_storeb, ptr); break; \
- case 2: __put_data_asm(_storeh, ptr); break; \
- case 4: __put_data_asm(_storew, ptr); break; \
- case 8: __PUT_DW(_stored, ptr); break; \
- default: __put_user_unknown(); break; \
- } \
-} while(0)
-#endif
-
/*
* Yuck. We need two variants, one for 64bit operation and one
* for 32 bit mode and old iron.
@@ -429,12 +332,9 @@ do { \
int __pu_err = 0; \
\
__pu_val = (x); \
- if (eva_kernel_access()) { \
- __put_kernel_common(ptr, size); \
- } else { \
- __chk_user_ptr(ptr); \
- __put_user_common(ptr, size); \
- } \
+ __chk_user_ptr(ptr); \
+ __put_user_common(ptr, size); \
+ \
__pu_err; \
})
@@ -445,11 +345,8 @@ do { \
int __pu_err = -EFAULT; \
\
might_fault(); \
- if (likely(access_ok( __pu_addr, size))) { \
- if (eva_kernel_access()) \
- __put_kernel_common(__pu_addr, size); \
- else \
- __put_user_common(__pu_addr, size); \
+ if (likely(access_ok(__pu_addr, size))) { \
+ __put_user_common(__pu_addr, size); \
} \
\
__pu_err; \
@@ -501,7 +398,23 @@ do { \
int __pu_err = 0; \
\
__pu_val = *(__force type *)(src); \
- __put_kernel_common(((type *)(dst)), sizeof(type)); \
+ switch (sizeof(type)) { \
+ case 1: \
+ __put_data_asm(kernel_sb, (type *)(dst)); \
+ break; \
+ case 2: \
+ __put_data_asm(kernel_sh, (type *)(dst)); \
+ break; \
+ case 4: \
+ __put_data_asm(kernel_sw, (type *)(dst)) \
+ break; \
+ case 8: \
+ __PUT_DW(kernel_sd, (type *)(dst)); \
+ break; \
+ default: \
+ __put_user_unknown(); \
+ break; \
+ } \
if (unlikely(__pu_err)) \
goto err_label; \
} while (0)
@@ -529,124 +442,85 @@ do { \
#define DADDI_SCRATCH "$0"
#endif
-extern size_t __copy_user(void *__to, const void *__from, size_t __n);
-
-#define __invoke_copy_from(func, to, from, n) \
-({ \
- register void *__cu_to_r __asm__("$4"); \
- register const void __user *__cu_from_r __asm__("$5"); \
- register long __cu_len_r __asm__("$6"); \
- \
- __cu_to_r = (to); \
- __cu_from_r = (from); \
- __cu_len_r = (n); \
- __asm__ __volatile__( \
- ".set\tnoreorder\n\t" \
- __MODULE_JAL(func) \
- ".set\tnoat\n\t" \
- __UA_ADDU "\t$1, %1, %2\n\t" \
- ".set\tat\n\t" \
- ".set\treorder" \
- : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
- : \
- : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
- DADDI_SCRATCH, "memory"); \
- __cu_len_r; \
-})
-
-#define __invoke_copy_to(func, to, from, n) \
-({ \
- register void __user *__cu_to_r __asm__("$4"); \
- register const void *__cu_from_r __asm__("$5"); \
- register long __cu_len_r __asm__("$6"); \
- \
- __cu_to_r = (to); \
- __cu_from_r = (from); \
- __cu_len_r = (n); \
- __asm__ __volatile__( \
- __MODULE_JAL(func) \
- : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
- : \
- : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
- DADDI_SCRATCH, "memory"); \
- __cu_len_r; \
-})
-
-#define __invoke_copy_from_kernel(to, from, n) \
- __invoke_copy_from(__copy_user, to, from, n)
-
-#define __invoke_copy_to_kernel(to, from, n) \
- __invoke_copy_to(__copy_user, to, from, n)
-
-#define ___invoke_copy_in_kernel(to, from, n) \
- __invoke_copy_from(__copy_user, to, from, n)
-
-#ifndef CONFIG_EVA
-#define __invoke_copy_from_user(to, from, n) \
- __invoke_copy_from(__copy_user, to, from, n)
-
-#define __invoke_copy_to_user(to, from, n) \
- __invoke_copy_to(__copy_user, to, from, n)
-
-#define ___invoke_copy_in_user(to, from, n) \
- __invoke_copy_from(__copy_user, to, from, n)
-
-#else
-
-/* EVA specific functions */
-
-extern size_t __copy_from_user_eva(void *__to, const void *__from,
- size_t __n);
-extern size_t __copy_to_user_eva(void *__to, const void *__from,
- size_t __n);
-extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
-
-/*
- * Source or destination address is in userland. We need to go through
- * the TLB
- */
-#define __invoke_copy_from_user(to, from, n) \
- __invoke_copy_from(__copy_from_user_eva, to, from, n)
-
-#define __invoke_copy_to_user(to, from, n) \
- __invoke_copy_to(__copy_to_user_eva, to, from, n)
-
-#define ___invoke_copy_in_user(to, from, n) \
- __invoke_copy_from(__copy_in_user_eva, to, from, n)
-
-#endif /* CONFIG_EVA */
+extern size_t __raw_copy_from_user(void *__to, const void *__from, size_t __n);
+extern size_t __raw_copy_to_user(void *__to, const void *__from, size_t __n);
+extern size_t __raw_copy_in_user(void *__to, const void *__from, size_t __n);
static inline unsigned long
-raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (eva_kernel_access())
- return __invoke_copy_to_kernel(to, from, n);
- else
- return __invoke_copy_to_user(to, from, n);
+ register void *__cu_to_r __asm__("$4");
+ register const void __user *__cu_from_r __asm__("$5");
+ register long __cu_len_r __asm__("$6");
+
+ __cu_to_r = to;
+ __cu_from_r = from;
+ __cu_len_r = n;
+
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ __MODULE_JAL(__raw_copy_from_user)
+ ".set\tnoat\n\t"
+ __UA_ADDU "\t$1, %1, %2\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
+ :
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
+ DADDI_SCRATCH, "memory");
+
+ return __cu_len_r;
}
static inline unsigned long
-raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (eva_kernel_access())
- return __invoke_copy_from_kernel(to, from, n);
- else
- return __invoke_copy_from_user(to, from, n);
+ register void __user *__cu_to_r __asm__("$4");
+ register const void *__cu_from_r __asm__("$5");
+ register long __cu_len_r __asm__("$6");
+
+ __cu_to_r = (to);
+ __cu_from_r = (from);
+ __cu_len_r = (n);
+
+ __asm__ __volatile__(
+ __MODULE_JAL(__raw_copy_to_user)
+ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
+ :
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
+ DADDI_SCRATCH, "memory");
+
+ return __cu_len_r;
}
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
static inline unsigned long
-raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
+raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
- if (eva_kernel_access())
- return ___invoke_copy_in_kernel(to, from, n);
- else
- return ___invoke_copy_in_user(to, from, n);
+ register void __user *__cu_to_r __asm__("$4");
+ register const void __user *__cu_from_r __asm__("$5");
+ register long __cu_len_r __asm__("$6");
+
+ __cu_to_r = to;
+ __cu_from_r = from;
+ __cu_len_r = n;
+
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ __MODULE_JAL(__raw_copy_in_user)
+ ".set\tnoat\n\t"
+ __UA_ADDU "\t$1, %1, %2\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
+ :
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
+ DADDI_SCRATCH, "memory");
+ return __cu_len_r;
}
-extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
/*
@@ -672,28 +546,16 @@ __clear_user(void __user *addr, __kernel_size_t size)
#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
#endif /* CONFIG_CPU_MICROMIPS */
- if (eva_kernel_access()) {
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, $0\n\t"
- "move\t$6, %2\n\t"
- __MODULE_JAL(__bzero_kernel)
- "move\t%0, $6"
- : "=r" (res)
- : "r" (addr), "r" (size)
- : bzero_clobbers);
- } else {
- might_fault();
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, $0\n\t"
- "move\t$6, %2\n\t"
- __MODULE_JAL(__bzero)
- "move\t%0, $6"
- : "=r" (res)
- : "r" (addr), "r" (size)
- : bzero_clobbers);
- }
+ might_fault();
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, $0\n\t"
+ "move\t$6, %2\n\t"
+ __MODULE_JAL(__bzero)
+ "move\t%0, $6"
+ : "=r" (res)
+ : "r" (addr), "r" (size)
+ : bzero_clobbers);
return res;
}
@@ -707,7 +569,6 @@ __clear_user(void __user *addr, __kernel_size_t size)
__cl_size; \
})
-extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
/*
@@ -733,33 +594,23 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
{
long res;
- if (eva_kernel_access()) {
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- "move\t$6, %3\n\t"
- __MODULE_JAL(__strncpy_from_kernel_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (__to), "r" (__from), "r" (__len)
- : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
- } else {
- might_fault();
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- "move\t$6, %3\n\t"
- __MODULE_JAL(__strncpy_from_user_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (__to), "r" (__from), "r" (__len)
- : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
- }
+ if (!access_ok(__from, __len))
+ return -EFAULT;
+
+ might_fault();
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ "move\t$6, %3\n\t"
+ __MODULE_JAL(__strncpy_from_user_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (__to), "r" (__from), "r" (__len)
+ : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
return res;
}
-extern long __strnlen_kernel_asm(const char __user *s, long n);
extern long __strnlen_user_asm(const char __user *s, long n);
/*
@@ -779,26 +630,18 @@ static inline long strnlen_user(const char __user *s, long n)
{
long res;
+ if (!access_ok(s, n))
+ return -0;
+
might_fault();
- if (eva_kernel_access()) {
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- __MODULE_JAL(__strnlen_kernel_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (s), "r" (n)
- : "$2", "$4", "$5", __UA_t0, "$31");
- } else {
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- __MODULE_JAL(__strnlen_user_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (s), "r" (n)
- : "$2", "$4", "$5", __UA_t0, "$31");
- }
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ __MODULE_JAL(__strnlen_user_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s), "r" (n)
+ : "$2", "$4", "$5", __UA_t0, "$31");
return res;
}