summaryrefslogtreecommitdiff
path: root/arch/s390/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/Makefile1
-rw-r--r--arch/s390/mm/cmm.c2
-rw-r--r--arch/s390/mm/dump_pagetables.c1
-rw-r--r--arch/s390/mm/extmem.c1
-rw-r--r--arch/s390/mm/fault.c111
-rw-r--r--arch/s390/mm/gmap.c9
-rw-r--r--arch/s390/mm/gup.c1
-rw-r--r--arch/s390/mm/hugetlbpage.c1
-rw-r--r--arch/s390/mm/init.c6
-rw-r--r--arch/s390/mm/maccess.c1
-rw-r--r--arch/s390/mm/mem_detect.c1
-rw-r--r--arch/s390/mm/page-states.c1
-rw-r--r--arch/s390/mm/pageattr.c1
-rw-r--r--arch/s390/mm/pgalloc.c19
-rw-r--r--arch/s390/mm/vmem.c17
15 files changed, 102 insertions, 71 deletions
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 0aa0ad165d8b..33fe418506bc 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the linux s390-specific parts of the memory manager.
#
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 829c63dbc81a..2dbdcd85b68f 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -56,7 +56,7 @@ static DEFINE_SPINLOCK(cmm_lock);
static struct task_struct *cmm_thread_ptr;
static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait);
-static DEFINE_TIMER(cmm_timer, NULL, 0, 0);
+static DEFINE_TIMER(cmm_timer, NULL);
static void cmm_timer_fn(unsigned long);
static void cmm_set_timer(void);
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 049c3c455b32..507f23ba2034 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/sched.h>
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 9f118629b55f..920d40894535 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Carsten Otte <cotte@de.ibm.com>
* Rob M van der Heij <rvdheij@nl.ibm.com>
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index bdabb013537b..93faeca52284 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* S390 version
* Copyright IBM Corp. 1999
@@ -49,6 +50,13 @@
#define VM_FAULT_SIGNAL 0x080000
#define VM_FAULT_PFAULT 0x100000
+enum fault_type {
+ KERNEL_FAULT,
+ USER_FAULT,
+ VDSO_FAULT,
+ GMAP_FAULT,
+};
+
static unsigned long store_indication __read_mostly;
static int __init fault_init(void)
@@ -98,27 +106,34 @@ void bust_spinlocks(int yes)
}
/*
- * Returns the address space associated with the fault.
- * Returns 0 for kernel space and 1 for user space.
+ * Find out which address space caused the exception.
+ * Access register mode is impossible, ignore space == 3.
*/
-static inline int user_space_fault(struct pt_regs *regs)
+static inline enum fault_type get_fault_type(struct pt_regs *regs)
{
unsigned long trans_exc_code;
- /*
- * The lowest two bits of the translation exception
- * identification indicate which paging table was used.
- */
trans_exc_code = regs->int_parm_long & 3;
- if (trans_exc_code == 3) /* home space -> kernel */
- return 0;
- if (user_mode(regs))
- return 1;
- if (trans_exc_code == 2) /* secondary space -> set_fs */
- return current->thread.mm_segment.ar4;
- if (current->flags & PF_VCPU)
- return 1;
- return 0;
+ if (likely(trans_exc_code == 0)) {
+ /* primary space exception */
+ if (IS_ENABLED(CONFIG_PGSTE) &&
+ test_pt_regs_flag(regs, PIF_GUEST_FAULT))
+ return GMAP_FAULT;
+ if (current->thread.mm_segment == USER_DS)
+ return USER_FAULT;
+ return KERNEL_FAULT;
+ }
+ if (trans_exc_code == 2) {
+ /* secondary space exception */
+ if (current->thread.mm_segment & 1) {
+ if (current->thread.mm_segment == USER_DS_SACF)
+ return USER_FAULT;
+ return KERNEL_FAULT;
+ }
+ return VDSO_FAULT;
+ }
+ /* home space exception -> access via kernel ASCE */
+ return KERNEL_FAULT;
}
static int bad_address(void *p)
@@ -203,20 +218,23 @@ static void dump_fault_info(struct pt_regs *regs)
break;
}
pr_cont("mode while using ");
- if (!user_space_fault(regs)) {
- asce = S390_lowcore.kernel_asce;
- pr_cont("kernel ");
- }
-#ifdef CONFIG_PGSTE
- else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
- struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
- asce = gmap->asce;
- pr_cont("gmap ");
- }
-#endif
- else {
+ switch (get_fault_type(regs)) {
+ case USER_FAULT:
asce = S390_lowcore.user_asce;
pr_cont("user ");
+ break;
+ case VDSO_FAULT:
+ asce = S390_lowcore.vdso_asce;
+ pr_cont("vdso ");
+ break;
+ case GMAP_FAULT:
+ asce = ((struct gmap *) S390_lowcore.gmap)->asce;
+ pr_cont("gmap ");
+ break;
+ case KERNEL_FAULT:
+ asce = S390_lowcore.kernel_asce;
+ pr_cont("kernel ");
+ break;
}
pr_cont("ASCE.\n");
dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
@@ -272,7 +290,7 @@ static noinline void do_no_context(struct pt_regs *regs)
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- if (!user_space_fault(regs))
+ if (get_fault_type(regs) == KERNEL_FAULT)
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" in virtual kernel address space\n");
else
@@ -394,12 +412,11 @@ static noinline void do_fault_error(struct pt_regs *regs, int access, int fault)
*/
static inline int do_exception(struct pt_regs *regs, int access)
{
-#ifdef CONFIG_PGSTE
struct gmap *gmap;
-#endif
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
+ enum fault_type type;
unsigned long trans_exc_code;
unsigned long address;
unsigned int flags;
@@ -424,8 +441,19 @@ static inline int do_exception(struct pt_regs *regs, int access)
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
- if (unlikely(!user_space_fault(regs) || faulthandler_disabled() || !mm))
+ type = get_fault_type(regs);
+ switch (type) {
+ case KERNEL_FAULT:
+ goto out;
+ case VDSO_FAULT:
+ fault = VM_FAULT_BADMAP;
goto out;
+ case USER_FAULT:
+ case GMAP_FAULT:
+ if (faulthandler_disabled() || !mm)
+ goto out;
+ break;
+ }
address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
@@ -436,10 +464,9 @@ static inline int do_exception(struct pt_regs *regs, int access)
flags |= FAULT_FLAG_WRITE;
down_read(&mm->mmap_sem);
-#ifdef CONFIG_PGSTE
- gmap = (current->flags & PF_VCPU) ?
- (struct gmap *) S390_lowcore.gmap : NULL;
- if (gmap) {
+ gmap = NULL;
+ if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
+ gmap = (struct gmap *) S390_lowcore.gmap;
current->thread.gmap_addr = address;
current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
current->thread.gmap_int_code = regs->int_code & 0xffff;
@@ -451,7 +478,6 @@ static inline int do_exception(struct pt_regs *regs, int access)
if (gmap->pfault_enabled)
flags |= FAULT_FLAG_RETRY_NOWAIT;
}
-#endif
retry:
fault = VM_FAULT_BADMAP;
@@ -506,15 +532,14 @@ retry:
regs, address);
}
if (fault & VM_FAULT_RETRY) {
-#ifdef CONFIG_PGSTE
- if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
+ (flags & FAULT_FLAG_RETRY_NOWAIT)) {
/* FAULT_FLAG_RETRY_NOWAIT has been set,
* mmap_sem has not been released */
current->thread.gmap_pfault = 1;
fault = VM_FAULT_PFAULT;
goto out_up;
}
-#endif
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~(FAULT_FLAG_ALLOW_RETRY |
@@ -524,8 +549,7 @@ retry:
goto retry;
}
}
-#ifdef CONFIG_PGSTE
- if (gmap) {
+ if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
address = __gmap_link(gmap, current->thread.gmap_addr,
address);
if (address == -EFAULT) {
@@ -537,7 +561,6 @@ retry:
goto out_up;
}
}
-#endif
fault = 0;
out_up:
up_read(&mm->mmap_sem);
@@ -705,7 +728,7 @@ static void pfault_interrupt(struct ext_code ext_code,
return;
inc_irq_stat(IRQEXT_PFL);
/* Get the token (= pid of the affected task). */
- pid = param64 & LPP_PFAULT_PID_MASK;
+ pid = param64 & LPP_PID_MASK;
rcu_read_lock();
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
if (tsk)
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 2f66290c9b92..b2c140193b0a 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -1187,12 +1187,11 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
unsigned long *sgt)
{
- unsigned long asce, *pgt;
+ unsigned long *pgt;
struct page *page;
int i;
BUG_ON(!gmap_is_shadow(sg));
- asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
continue;
@@ -1245,12 +1244,11 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
unsigned long *r3t)
{
- unsigned long asce, *sgt;
+ unsigned long *sgt;
struct page *page;
int i;
BUG_ON(!gmap_is_shadow(sg));
- asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
continue;
@@ -1303,12 +1301,11 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
unsigned long *r2t)
{
- unsigned long asce, *r3t;
+ unsigned long *r3t;
struct page *page;
int i;
BUG_ON(!gmap_is_shadow(sg));
- asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
continue;
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 98ffe3ee9411..05c8abd864f1 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Lockless get_user_pages_fast for s390
*
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 44a8e6f0391e..e804090f4470 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IBM System z Huge TLB Page Support for Kernel.
*
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 3b567838b905..671535e64aba 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* S390 version
* Copyright IBM Corp. 1999
@@ -94,6 +95,7 @@ void __init paging_init(void)
}
init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
S390_lowcore.kernel_asce = init_mm.context.asce;
+ S390_lowcore.user_asce = S390_lowcore.kernel_asce;
crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
vmem_map_init();
@@ -144,8 +146,8 @@ void __init mem_init(void)
void free_initmem(void)
{
- __set_memory((unsigned long) _sinittext,
- (_einittext - _sinittext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_sinittext,
+ (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RW | SET_MEMORY_NX);
free_initmem_default(POISON_FREE_INITMEM);
}
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 792f9c63fbca..7be06475809b 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Access kernel memory without faulting -- s390 specific implementation.
*
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
index e58dca05b962..21f6c82c8296 100644
--- a/arch/s390/mm/mem_detect.c
+++ b/arch/s390/mm/mem_detect.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2008, 2009
*
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index 07fa7b8ae233..382153ff17e3 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2008
*
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 552f898dfa74..c44171588d08 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2011
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 05f1f27e6708..434a9564917b 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Page table allocation functions
*
@@ -70,10 +71,8 @@ static void __crst_table_upgrade(void *arg)
{
struct mm_struct *mm = arg;
- if (current->active_mm == mm) {
- clear_user_asce();
+ if (current->active_mm == mm)
set_user_asce(mm);
- }
__tlb_flush_local();
}
@@ -158,13 +157,13 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
struct page *page_table_alloc_pgste(struct mm_struct *mm)
{
struct page *page;
- unsigned long *table;
+ u64 *table;
page = alloc_page(GFP_KERNEL);
if (page) {
- table = (unsigned long *) page_to_phys(page);
- clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
- clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+ table = (u64 *)page_to_phys(page);
+ memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
+ memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
}
return page;
}
@@ -221,12 +220,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
if (mm_alloc_pgste(mm)) {
/* Return 4K page table with PGSTEs */
atomic_set(&page->_mapcount, 3);
- clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
- clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+ memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
+ memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
} else {
/* Return the first 2K fragment of the page */
atomic_set(&page->_mapcount, 1);
- clear_table(table, _PAGE_INVALID, PAGE_SIZE);
+ memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
spin_lock_bh(&mm->context.lock);
list_add(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.lock);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index c0af0d7b6e5f..3316d463fc29 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2006
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
@@ -59,7 +60,7 @@ pte_t __ref *vmem_pte_alloc(void)
pte = (pte_t *) memblock_alloc(size, size);
if (!pte)
return NULL;
- clear_table((unsigned long *) pte, _PAGE_INVALID, size);
+ memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
return pte;
}
@@ -402,17 +403,17 @@ void __init vmem_map_init(void)
for_each_memblock(memory, reg)
vmem_add_mem(reg->base, reg->size);
- __set_memory((unsigned long) _stext,
- (_etext - _stext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_stext,
+ (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
- __set_memory((unsigned long) _etext,
- (_eshared - _etext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_etext,
+ (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
SET_MEMORY_RO);
- __set_memory((unsigned long) _sinittext,
- (_einittext - _sinittext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_sinittext,
+ (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
pr_info("Write protected kernel read-only data: %luk\n",
- (_eshared - _stext) >> 10);
+ (unsigned long)(__end_rodata - _stext) >> 10);
}
/*