summaryrefslogtreecommitdiff
path: root/arch/s390/lib
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2020-11-16 08:06:40 +0100
committerHeiko Carstens <hca@linux.ibm.com>2020-11-23 12:01:12 +0100
commit87d5986345219a7e4f204726d9085ea87f3e22d0 (patch)
tree7c3cb89736d699e56186dce5f3863c311afb6e57 /arch/s390/lib
parent77663819d4901e1f982e69ca336daa1bc830c5d6 (diff)
s390/mm: remove set_fs / rework address space handling
Remove set_fs support from s390. With doing this rework address space handling and simplify it. As a result address spaces are now setup like this: CPU running in | %cr1 ASCE | %cr7 ASCE | %cr13 ASCE ----------------------------|-----------|-----------|----------- user space | user | user | kernel kernel, normal execution | kernel | user | kernel kernel, kvm guest execution | gmap | user | kernel To achieve this the getcpu vdso syscall is removed in order to avoid secondary address mode and a separate vdso address space in for user space. The getcpu vdso syscall will be implemented differently with a subsequent patch. The kernel accesses user space always via secondary address space. This happens in different ways: - with mvcos in home space mode and directly read/write to secondary address space - with mvcs/mvcp in primary space mode and copy from primary space to secondary space or vice versa - with e.g. cs in secondary space mode and access secondary space Switching translation modes happens with sacf before and after instructions which access user space, like before. Lazy handling of control register reloading is removed in the hope to make everything simpler, but at the cost of making kernel entry and exit a bit slower. That is: on kernel entry the primary asce is always changed to contain the kernel asce, and on kernel exit the primary asce is changed again so it contains the user asce. In kernel mode there is only one exception to the primary asce: when kvm guests are executed the primary asce contains the gmap asce (which describes the guest address space). The primary asce is reset to kernel asce whenever kvm guest execution is interrupted, so that this doesn't has to be taken into account for any user space accesses. Reviewed-by: Sven Schnelle <svens@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/uaccess.c89
1 files changed, 5 insertions, 84 deletions
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 0267405ab7c6..0ffbe1fad72a 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -40,71 +40,10 @@ static inline int copy_with_mvcos(void)
}
#endif
-void set_fs(mm_segment_t fs)
-{
- current->thread.mm_segment = fs;
- if (fs == USER_DS) {
- __ctl_load(S390_lowcore.user_asce, 1, 1);
- clear_cpu_flag(CIF_ASCE_PRIMARY);
- } else {
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
- set_cpu_flag(CIF_ASCE_PRIMARY);
- }
- if (fs & 1) {
- if (fs == USER_DS_SACF)
- __ctl_load(S390_lowcore.user_asce, 7, 7);
- else
- __ctl_load(S390_lowcore.kernel_asce, 7, 7);
- set_cpu_flag(CIF_ASCE_SECONDARY);
- }
-}
-EXPORT_SYMBOL(set_fs);
-
-mm_segment_t enable_sacf_uaccess(void)
-{
- mm_segment_t old_fs;
- unsigned long asce, cr;
- unsigned long flags;
-
- old_fs = current->thread.mm_segment;
- if (old_fs & 1)
- return old_fs;
- /* protect against a concurrent page table upgrade */
- local_irq_save(flags);
- current->thread.mm_segment |= 1;
- asce = S390_lowcore.kernel_asce;
- if (likely(old_fs == USER_DS)) {
- __ctl_store(cr, 1, 1);
- if (cr != S390_lowcore.kernel_asce) {
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
- set_cpu_flag(CIF_ASCE_PRIMARY);
- }
- asce = S390_lowcore.user_asce;
- }
- __ctl_store(cr, 7, 7);
- if (cr != asce) {
- __ctl_load(asce, 7, 7);
- set_cpu_flag(CIF_ASCE_SECONDARY);
- }
- local_irq_restore(flags);
- return old_fs;
-}
-EXPORT_SYMBOL(enable_sacf_uaccess);
-
-void disable_sacf_uaccess(mm_segment_t old_fs)
-{
- current->thread.mm_segment = old_fs;
- if (old_fs == USER_DS && test_facility(27)) {
- __ctl_load(S390_lowcore.user_asce, 1, 1);
- clear_cpu_flag(CIF_ASCE_PRIMARY);
- }
-}
-EXPORT_SYMBOL(disable_sacf_uaccess);
-
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x01UL;
+ register unsigned long reg0 asm("0") = 0x81UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -135,9 +74,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
unsigned long size)
{
unsigned long tmp1, tmp2;
- mm_segment_t old_fs;
- old_fs = enable_sacf_uaccess();
tmp1 = -256UL;
asm volatile(
" sacf 0\n"
@@ -164,7 +101,6 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
- disable_sacf_uaccess(old_fs);
return size;
}
@@ -179,7 +115,7 @@ EXPORT_SYMBOL(raw_copy_from_user);
static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x010000UL;
+ register unsigned long reg0 asm("0") = 0x810000UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -210,9 +146,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
unsigned long size)
{
unsigned long tmp1, tmp2;
- mm_segment_t old_fs;
- old_fs = enable_sacf_uaccess();
tmp1 = -256UL;
asm volatile(
" sacf 0\n"
@@ -239,7 +173,6 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
- disable_sacf_uaccess(old_fs);
return size;
}
@@ -254,7 +187,7 @@ EXPORT_SYMBOL(raw_copy_to_user);
static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x010001UL;
+ register unsigned long reg0 asm("0") = 0x810081UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -277,10 +210,8 @@ static inline unsigned long copy_in_user_mvcos(void __user *to, const void __use
static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
unsigned long size)
{
- mm_segment_t old_fs;
unsigned long tmp1;
- old_fs = enable_sacf_uaccess();
asm volatile(
" sacf 256\n"
" aghi %0,-1\n"
@@ -304,7 +235,6 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
: : "cc", "memory");
- disable_sacf_uaccess(old_fs);
return size;
}
@@ -318,7 +248,7 @@ EXPORT_SYMBOL(raw_copy_in_user);
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x010000UL;
+ register unsigned long reg0 asm("0") = 0x810000UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -346,10 +276,8 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
{
- mm_segment_t old_fs;
unsigned long tmp1, tmp2;
- old_fs = enable_sacf_uaccess();
asm volatile(
" sacf 256\n"
" aghi %0,-1\n"
@@ -378,7 +306,6 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
- disable_sacf_uaccess(old_fs);
return size;
}
@@ -414,15 +341,9 @@ static inline unsigned long strnlen_user_srst(const char __user *src,
unsigned long __strnlen_user(const char __user *src, unsigned long size)
{
- mm_segment_t old_fs;
- unsigned long len;
-
if (unlikely(!size))
return 0;
- old_fs = enable_sacf_uaccess();
- len = strnlen_user_srst(src, size);
- disable_sacf_uaccess(old_fs);
- return len;
+ return strnlen_user_srst(src, size);
}
EXPORT_SYMBOL(__strnlen_user);