diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-18 15:41:45 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-18 15:41:45 -0700 |
commit | 1c7d0c3af5cc8adafef6477f9416820fc894ca40 (patch) | |
tree | 449450c8ca1726cefb8197256b61c4de0b7cfddb /arch/s390/mm | |
parent | dde1a0e1625c08cf4f958348a83434b2ddecf449 (diff) | |
parent | df39038cd89525d465c2c8827eb64116873f141a (diff) |
Merge tag 's390-6.11-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Vasily Gorbik:
- Remove restrictions on PAI NNPA and crypto counters, enabling
concurrent per-task and system-wide sampling and counting events
- Switch to GENERIC_CPU_DEVICES by setting up the CPU present mask in
the architecture code and letting the generic code handle CPU
bring-up
- Add support for the diag204 busy indication facility to prevent
undesirable blocking during hypervisor logical CPU utilization
queries. Implement results caching
- Improve the handling of Store Data SCLP events by suppressing
unnecessary warning, preventing buffer release in I/O during
failures, and adding timeout handling for Store Data requests to
address potential firmware issues
- Provide optimized __arch_hweight*() implementations
- Remove the unnecessary CPU KOBJ_CHANGE uevents generated during
topology updates, as they are unused and also not present on other
architectures
- Cleanup atomic_ops, optimize __atomic_set() for small values and
__atomic_cmpxchg_bool() for compilers supporting flag output
constraint
- Couple of cleanups for KVM:
- Move and improve KVM struct definitions for DAT tables from
gaccess.c to a new header
- Pass the asce as parameter to sie64a()
- Make the crdte() and cspg() page table handling wrappers return a
boolean to indicate success, like the other existing "compare and
swap" wrappers
- Add documentation for HWCAP flags
- Switch to obtaining total RAM pages from memblock instead of
totalram_pages() during mm init, to ensure correct calculation of
zero page size, when defer_init is enabled
- Refactor lowcore access and switch to using the get_lowcore()
function instead of the S390_lowcore macro
- Cleanups for PG_arch_1 and folio handling in UV and hugetlb code
- Add missing MODULE_DESCRIPTION() macros
- Fix VM_FAULT_HWPOISON handling in do_exception()
* tag 's390-6.11-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (54 commits)
s390/mm: Fix VM_FAULT_HWPOISON handling in do_exception()
s390/kvm: Move bitfields for dat tables
s390/entry: Pass the asce as parameter to sie64a()
s390/sthyi: Use cached data when diag is busy
s390/sthyi: Move diag operations
s390/hypfs_diag: Diag204 busy loop
s390/diag: Add busy-indication-facility requirements
s390/diag: Diag204 add busy return errno
s390/diag: Return errno's from diag204
s390/sclp: Diag204 busy indication facility detection
s390/atomic_ops: Make use of flag output constraint
s390/atomic_ops: Improve __atomic_set() for small values
s390/atomic_ops: Use symbolic names
s390/smp: Switch to GENERIC_CPU_DEVICES
s390/hwcaps: Add documentation for HWCAP flags
s390/pgtable: Make crdte() and cspg() return a value
s390/topology: Remove CPU KOBJ_CHANGE uevents
s390/sclp: Add timeout to Store Data requests
s390/sclp: Prevent release of buffer in I/O
s390/sclp: Suppress unnecessary Store Data warning
...
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/cmm.c | 1 | ||||
-rw-r--r-- | arch/s390/mm/dump_pagetables.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 33 | ||||
-rw-r--r-- | arch/s390/mm/gmap.c | 16 | ||||
-rw-r--r-- | arch/s390/mm/hugetlbpage.c | 8 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 3 | ||||
-rw-r--r-- | arch/s390/mm/pageattr.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/pgalloc.c | 4 |
8 files changed, 38 insertions, 31 deletions
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index f8b13f247646..5cb5e724cde3 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -427,4 +427,5 @@ static void __exit cmm_exit(void) } module_exit(cmm_exit); +MODULE_DESCRIPTION("Cooperative memory management interface"); MODULE_LICENSE("GPL"); diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index ffd07ed7b4af..45db5f47b22d 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -288,7 +288,7 @@ static int pt_dump_init(void) * kernel ASCE. We need this to keep the page table walker functions * from accessing non-existent entries. */ - max_addr = (S390_lowcore.kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2; + max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2; max_addr = 1UL << (max_addr * 11 + 31); address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size; address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 65747f15dbec..8e149ef5e89b 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -74,7 +74,7 @@ static enum fault_type get_fault_type(struct pt_regs *regs) return USER_FAULT; if (!IS_ENABLED(CONFIG_PGSTE)) return KERNEL_FAULT; - gmap = (struct gmap *)S390_lowcore.gmap; + gmap = (struct gmap *)get_lowcore()->gmap; if (gmap && gmap->asce == regs->cr1) return GMAP_FAULT; return KERNEL_FAULT; @@ -182,15 +182,15 @@ static void dump_fault_info(struct pt_regs *regs) pr_cont("mode while using "); switch (get_fault_type(regs)) { case USER_FAULT: - asce = S390_lowcore.user_asce.val; + asce = get_lowcore()->user_asce.val; pr_cont("user "); break; case GMAP_FAULT: - asce = ((struct gmap *)S390_lowcore.gmap)->asce; + asce = ((struct gmap *)get_lowcore()->gmap)->asce; pr_cont("gmap "); break; case KERNEL_FAULT: - asce = S390_lowcore.kernel_asce.val; + asce = get_lowcore()->kernel_asce.val; pr_cont("kernel "); break; default: @@ -351,7 +351,7 @@ lock_mmap: mmap_read_lock(mm); gmap = NULL; if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) { - gmap = (struct gmap *)S390_lowcore.gmap; + gmap = (struct gmap *)get_lowcore()->gmap; current->thread.gmap_addr = address; current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE); current->thread.gmap_int_code = regs->int_code & 0xffff; @@ -433,12 +433,13 @@ error: handle_fault_error_nolock(regs, 0); else do_sigsegv(regs, SEGV_MAPERR); - } else if (fault & VM_FAULT_SIGBUS) { + } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON)) { if (!user_mode(regs)) handle_fault_error_nolock(regs, 0); else do_sigbus(regs); } else { + pr_emerg("Unexpected fault flags: %08x\n", fault); BUG(); } } @@ -492,6 +493,7 @@ void do_secure_storage_access(struct pt_regs *regs) unsigned long addr = get_fault_address(regs); struct vm_area_struct *vma; struct mm_struct *mm; + struct folio *folio; struct page *page; struct gmap *gmap; int rc; @@ -521,7 +523,7 @@ void do_secure_storage_access(struct pt_regs *regs) switch (get_fault_type(regs)) { case GMAP_FAULT: mm = current->mm; - gmap = (struct gmap *)S390_lowcore.gmap; + gmap = (struct gmap *)get_lowcore()->gmap; mmap_read_lock(mm); addr = __gmap_translate(gmap, addr); mmap_read_unlock(mm); @@ -539,17 +541,18 @@ void do_secure_storage_access(struct pt_regs *regs) mmap_read_unlock(mm); break; } - if (arch_make_page_accessible(page)) + folio = page_folio(page); + if (arch_make_folio_accessible(folio)) send_sig(SIGSEGV, current, 0); - put_page(page); + folio_put(folio); mmap_read_unlock(mm); break; case KERNEL_FAULT: - page = phys_to_page(addr); - if (unlikely(!try_get_page(page))) + folio = phys_to_folio(addr); + if (unlikely(!folio_try_get(folio))) break; - rc = arch_make_page_accessible(page); - put_page(page); + rc = arch_make_folio_accessible(folio); + folio_put(folio); if (rc) BUG(); break; @@ -561,7 +564,7 @@ NOKPROBE_SYMBOL(do_secure_storage_access); void do_non_secure_storage_access(struct pt_regs *regs) { - struct gmap *gmap = (struct gmap *)S390_lowcore.gmap; + struct gmap *gmap = (struct gmap *)get_lowcore()->gmap; unsigned long gaddr = get_fault_address(regs); if (WARN_ON_ONCE(get_fault_type(regs) != GMAP_FAULT)) @@ -573,7 +576,7 @@ NOKPROBE_SYMBOL(do_non_secure_storage_access); void do_secure_storage_violation(struct pt_regs *regs) { - struct gmap *gmap = (struct gmap *)S390_lowcore.gmap; + struct gmap *gmap = (struct gmap *)get_lowcore()->gmap; unsigned long gaddr = get_fault_address(regs); /* diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 474a25ca5c48..eb0b51a36be0 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -287,7 +287,7 @@ EXPORT_SYMBOL_GPL(gmap_remove); */ void gmap_enable(struct gmap *gmap) { - S390_lowcore.gmap = (unsigned long) gmap; + get_lowcore()->gmap = (unsigned long)gmap; } EXPORT_SYMBOL_GPL(gmap_enable); @@ -297,7 +297,7 @@ EXPORT_SYMBOL_GPL(gmap_enable); */ void gmap_disable(struct gmap *gmap) { - S390_lowcore.gmap = 0UL; + get_lowcore()->gmap = 0UL; } EXPORT_SYMBOL_GPL(gmap_disable); @@ -308,7 +308,7 @@ EXPORT_SYMBOL_GPL(gmap_disable); */ struct gmap *gmap_get_enabled(void) { - return (struct gmap *) S390_lowcore.gmap; + return (struct gmap *)get_lowcore()->gmap; } EXPORT_SYMBOL_GPL(gmap_get_enabled); @@ -2733,7 +2733,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, { pmd_t *pmd = (pmd_t *)pte; unsigned long start, end; - struct page *page = pmd_page(*pmd); + struct folio *folio = page_folio(pmd_page(*pmd)); /* * The write check makes sure we do not set a key on shared @@ -2748,7 +2748,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, start = pmd_val(*pmd) & HPAGE_MASK; end = start + HPAGE_SIZE; __storage_key_init_range(start, end); - set_bit(PG_arch_1, &page->flags); + set_bit(PG_arch_1, &folio->flags); cond_resched(); return 0; } @@ -2841,13 +2841,15 @@ static const struct mm_walk_ops gather_pages_ops = { */ void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns) { + struct folio *folio; unsigned long i; for (i = 0; i < count; i++) { + folio = pfn_folio(pfns[i]); /* we always have an extra reference */ - uv_destroy_owned_page(pfn_to_phys(pfns[i])); + uv_destroy_folio(folio); /* get rid of the extra reference */ - put_page(pfn_to_page(pfns[i])); + folio_put(folio); cond_resched(); } } diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 2675aab4acc7..34d558164f0d 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -121,7 +121,7 @@ static inline pte_t __rste_to_pte(unsigned long rste) static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste) { - struct page *page; + struct folio *folio; unsigned long size, paddr; if (!mm_uses_skeys(mm) || @@ -129,16 +129,16 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste) return; if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) { - page = pud_page(__pud(rste)); + folio = page_folio(pud_page(__pud(rste))); size = PUD_SIZE; paddr = rste & PUD_MASK; } else { - page = pmd_page(__pmd(rste)); + folio = page_folio(pmd_page(__pmd(rste))); size = PMD_SIZE; paddr = rste & PMD_MASK; } - if (!test_and_set_bit(PG_arch_1, &page->flags)) + if (!test_and_set_bit(PG_arch_1, &folio->flags)) __storage_key_init_range(paddr, paddr + size); } diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index e769d2726f4e..ddcd39ef4346 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -62,6 +62,7 @@ EXPORT_SYMBOL(zero_page_mask); static void __init setup_zero_pages(void) { + unsigned long total_pages = PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size()); unsigned int order; struct page *page; int i; @@ -70,7 +71,7 @@ static void __init setup_zero_pages(void) order = 7; /* Limit number of empty zero pages for small memory sizes */ - while (order > 2 && (totalram_pages() >> 10) < (1UL << order)) + while (order > 2 && (total_pages >> 10) < (1UL << order)) order--; empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 01bc8fad64d6..5f805ad42d4c 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -75,7 +75,7 @@ static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr, break; } table = (unsigned long *)((unsigned long)old & mask); - crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce.val); + crdte(*old, new, table, dtt, addr, get_lowcore()->kernel_asce.val); } else if (MACHINE_HAS_IDTE) { cspg(old, *old, new); } else { diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 7e3e767ab87d..f691e0fb66a2 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -66,8 +66,8 @@ static void __crst_table_upgrade(void *arg) /* change all active ASCEs to avoid the creation of new TLBs */ if (current->active_mm == mm) { - S390_lowcore.user_asce.val = mm->context.asce; - local_ctl_load(7, &S390_lowcore.user_asce); + get_lowcore()->user_asce.val = mm->context.asce; + local_ctl_load(7, &get_lowcore()->user_asce); } __tlb_flush_local(); } |