diff options
Diffstat (limited to 'arch/s390/include/asm/mmu_context.h')
| -rw-r--r-- | arch/s390/include/asm/mmu_context.h | 164 |
1 files changed, 98 insertions, 66 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 084e7755ed9b..d9b8501bc93d 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * S390 version * @@ -8,99 +9,130 @@ #define __S390_MMU_CONTEXT_H #include <asm/pgalloc.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> +#include <linux/mm_types.h> #include <asm/tlbflush.h> -#include <asm/ctl_reg.h> +#include <asm/ctlreg.h> +#include <asm/asce.h> +#include <asm-generic/mm_hooks.h> +#define init_new_context init_new_context static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { - atomic_set(&mm->context.attach_count, 0); + unsigned long asce_type, init_entry; + + spin_lock_init(&mm->context.lock); + INIT_LIST_HEAD(&mm->context.gmap_list); + cpumask_clear(&mm->context.cpu_attach_mask); + atomic_set(&mm->context.flush_count, 0); + atomic_set(&mm->context.protected_count, 0); + mm->context.gmap_asce = 0; mm->context.flush_mm = 0; - mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; -#ifdef CONFIG_64BIT - mm->context.asce_bits |= _ASCE_TYPE_REGION3; +#ifdef CONFIG_PGSTE + mm->context.has_pgste = 0; + mm->context.uses_skeys = 0; + mm->context.uses_cmm = 0; + mm->context.allow_cow_sharing = 1; + mm->context.allow_gmap_hpage_1m = 0; #endif - if (current->mm && current->mm->context.alloc_pgste) { + switch (mm->context.asce_limit) { + default: /* - * alloc_pgste indicates, that any NEW context will be created - * with extended page tables. The old context is unchanged. The - * page table allocation and the page table operations will - * look at has_pgste to distinguish normal and extended page - * tables. The only way to create extended page tables is to - * set alloc_pgste and then create a new context (e.g. dup_mm). - * The page table allocation is called after init_new_context - * and if has_pgste is set, it will create extended page - * tables. + * context created by exec, the value of asce_limit can + * only be zero in this case */ - mm->context.has_pgste = 1; - mm->context.alloc_pgste = 1; - } else { - mm->context.has_pgste = 0; - mm->context.alloc_pgste = 0; + VM_BUG_ON(mm->context.asce_limit); + /* continue as 3-level task */ + mm->context.asce_limit = _REGION2_SIZE; + fallthrough; + case _REGION2_SIZE: + /* forked 3-level task */ + init_entry = _REGION3_ENTRY_EMPTY; + asce_type = _ASCE_TYPE_REGION3; + break; + case TASK_SIZE_MAX: + /* forked 5-level task */ + init_entry = _REGION1_ENTRY_EMPTY; + asce_type = _ASCE_TYPE_REGION1; + break; + case _REGION1_SIZE: + /* forked 4-level task */ + init_entry = _REGION2_ENTRY_EMPTY; + asce_type = _ASCE_TYPE_REGION2; + break; } - mm->context.asce_limit = STACK_TOP_MAX; - crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | + _ASCE_USER_BITS | asce_type; + crst_table_init((unsigned long *) mm->pgd, init_entry); return 0; } -#define destroy_context(mm) do { } while (0) - -#ifndef CONFIG_64BIT -#define LCTL_OPCODE "lctl" -#else -#define LCTL_OPCODE "lctlg" -#endif - -static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) +static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) { - pgd_t *pgd = mm->pgd; + int cpu = smp_processor_id(); - S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); - if (s390_user_mode != HOME_SPACE_MODE) { - /* Load primary space page table origin. */ - asm volatile(LCTL_OPCODE" 1,1,%0\n" - : : "m" (S390_lowcore.user_asce) ); - } else - /* Load home space page table origin. */ - asm volatile(LCTL_OPCODE" 13,13,%0" - : : "m" (S390_lowcore.user_asce) ); - set_fs(current->thread.mm_segment); + if (next == &init_mm) + get_lowcore()->user_asce = s390_invalid_asce; + else + get_lowcore()->user_asce.val = next->context.asce; + cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); + /* Clear previous user-ASCE from CR1 and CR7 */ + local_ctl_load(1, &s390_invalid_asce); + local_ctl_load(7, &s390_invalid_asce); + if (prev != next) + cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); } +#define switch_mm_irqs_off switch_mm_irqs_off static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { - cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); - update_mm(next, tsk); - atomic_dec(&prev->context.attach_count); - WARN_ON(atomic_read(&prev->context.attach_count) < 0); - atomic_inc(&next->context.attach_count); - /* Check for TLBs not flushed yet */ - if (next->context.flush_mm) - __tlb_flush_mm(next); -} - -#define enter_lazy_tlb(mm,tsk) do { } while (0) -#define deactivate_mm(tsk,mm) do { } while (0) + unsigned long flags; -static inline void activate_mm(struct mm_struct *prev, - struct mm_struct *next) -{ - switch_mm(prev, next, current); + local_irq_save(flags); + switch_mm_irqs_off(prev, next, tsk); + local_irq_restore(flags); } -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +#define finish_arch_post_lock_switch finish_arch_post_lock_switch +static inline void finish_arch_post_lock_switch(void) { -#ifdef CONFIG_64BIT - if (oldmm->context.asce_limit < mm->context.asce_limit) - crst_table_downgrade(mm, oldmm->context.asce_limit); -#endif + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + unsigned long flags; + + if (mm) { + preempt_disable(); + while (atomic_read(&mm->context.flush_count)) + cpu_relax(); + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + __tlb_flush_mm_lazy(mm); + preempt_enable(); + } + local_irq_save(flags); + if (test_thread_flag(TIF_ASCE_PRIMARY)) + local_ctl_load(1, &get_lowcore()->kernel_asce); + else + local_ctl_load(1, &get_lowcore()->user_asce); + local_ctl_load(7, &get_lowcore()->user_asce); + local_irq_restore(flags); } -static inline void arch_exit_mmap(struct mm_struct *mm) +#define activate_mm activate_mm +static inline void activate_mm(struct mm_struct *prev, + struct mm_struct *next) { + switch_mm_irqs_off(prev, next, current); + cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); + if (test_thread_flag(TIF_ASCE_PRIMARY)) + local_ctl_load(1, &get_lowcore()->kernel_asce); + else + local_ctl_load(1, &get_lowcore()->user_asce); + local_ctl_load(7, &get_lowcore()->user_asce); } +#include <asm-generic/mmu_context.h> + #endif /* __S390_MMU_CONTEXT_H */ |
