summaryrefslogtreecommitdiff
path: root/arch/m68k
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-01-26 18:36:23 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-01-26 18:36:23 -0800
commit9c5968db9e625019a0ee5226c7eebef5519d366a (patch)
tree231c54fb0cbd182f9ce609eefd6d2d551c71ecad /arch/m68k
parentc159dfbdd4fc62fa08f6715d9d6c34d39cf40446 (diff)
parentd1366e74342e75555af2648a2964deb2d5c92200 (diff)
Merge tag 'mm-stable-2025-01-26-14-59' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton: "The various patchsets are summarized below. Plus of course many indivudual patches which are described in their changelogs. - "Allocate and free frozen pages" from Matthew Wilcox reorganizes the page allocator so we end up with the ability to allocate and free zero-refcount pages. So that callers (ie, slab) can avoid a refcount inc & dec - "Support large folios for tmpfs" from Baolin Wang teaches tmpfs to use large folios other than PMD-sized ones - "Fix mm/rodata_test" from Petr Tesarik performs some maintenance and fixes for this small built-in kernel selftest - "mas_anode_descend() related cleanup" from Wei Yang tidies up part of the mapletree code - "mm: fix format issues and param types" from Keren Sun implements a few minor code cleanups - "simplify split calculation" from Wei Yang provides a few fixes and a test for the mapletree code - "mm/vma: make more mmap logic userland testable" from Lorenzo Stoakes continues the work of moving vma-related code into the (relatively) new mm/vma.c - "mm/page_alloc: gfp flags cleanups for alloc_contig_*()" from David Hildenbrand cleans up and rationalizes handling of gfp flags in the page allocator - "readahead: Reintroduce fix for improper RA window sizing" from Jan Kara is a second attempt at fixing a readahead window sizing issue. It should reduce the amount of unnecessary reading - "synchronously scan and reclaim empty user PTE pages" from Qi Zheng addresses an issue where "huge" amounts of pte pagetables are accumulated: https://lore.kernel.org/lkml/cover.1718267194.git.zhengqi.arch@bytedance.com/ Qi's series addresses this windup by synchronously freeing PTE memory within the context of madvise(MADV_DONTNEED) - "selftest/mm: Remove warnings found by adding compiler flags" from Muhammad Usama Anjum fixes some build warnings in the selftests code when optional compiler warnings are enabled - "mm: don't use __GFP_HARDWALL when migrating remote pages" from David Hildenbrand tightens the allocator's observance of __GFP_HARDWALL - "pkeys kselftests improvements" from Kevin Brodsky implements various fixes and cleanups in the MM selftests code, mainly pertaining to the pkeys tests - "mm/damon: add sample modules" from SeongJae Park enhances DAMON to estimate application working set size - "memcg/hugetlb: Rework memcg hugetlb charging" from Joshua Hahn provides some cleanups to memcg's hugetlb charging logic - "mm/swap_cgroup: remove global swap cgroup lock" from Kairui Song removes the global swap cgroup lock. A speedup of 10% for a tmpfs-based kernel build was demonstrated - "zram: split page type read/write handling" from Sergey Senozhatsky has several fixes and cleaups for zram in the area of zram_write_page(). A watchdog softlockup warning was eliminated - "move pagetable_*_dtor() to __tlb_remove_table()" from Kevin Brodsky cleans up the pagetable destructor implementations. A rare use-after-free race is fixed - "mm/debug: introduce and use VM_WARN_ON_VMG()" from Lorenzo Stoakes simplifies and cleans up the debugging code in the VMA merging logic - "Account page tables at all levels" from Kevin Brodsky cleans up and regularizes the pagetable ctor/dtor handling. This results in improvements in accounting accuracy - "mm/damon: replace most damon_callback usages in sysfs with new core functions" from SeongJae Park cleans up and generalizes DAMON's sysfs file interface logic - "mm/damon: enable page level properties based monitoring" from SeongJae Park increases the amount of information which is presented in response to DAMOS actions - "mm/damon: remove DAMON debugfs interface" from SeongJae Park removes DAMON's long-deprecated debugfs interfaces. Thus the migration to sysfs is completed - "mm/hugetlb: Refactor hugetlb allocation resv accounting" from Peter Xu cleans up and generalizes the hugetlb reservation accounting - "mm: alloc_pages_bulk: small API refactor" from Luiz Capitulino removes a never-used feature of the alloc_pages_bulk() interface - "mm/damon: extend DAMOS filters for inclusion" from SeongJae Park extends DAMOS filters to support not only exclusion (rejecting), but also inclusion (allowing) behavior - "Add zpdesc memory descriptor for zswap.zpool" from Alex Shi introduces a new memory descriptor for zswap.zpool that currently overlaps with struct page for now. This is part of the effort to reduce the size of struct page and to enable dynamic allocation of memory descriptors - "mm, swap: rework of swap allocator locks" from Kairui Song redoes and simplifies the swap allocator locking. A speedup of 400% was demonstrated for one workload. As was a 35% reduction for kernel build time with swap-on-zram - "mm: update mips to use do_mmap(), make mmap_region() internal" from Lorenzo Stoakes reworks MIPS's use of mmap_region() so that mmap_region() can be made MM-internal - "mm/mglru: performance optimizations" from Yu Zhao fixes a few MGLRU regressions and otherwise improves MGLRU performance - "Docs/mm/damon: add tuning guide and misc updates" from SeongJae Park updates DAMON documentation - "Cleanup for memfd_create()" from Isaac Manjarres does that thing - "mm: hugetlb+THP folio and migration cleanups" from David Hildenbrand provides various cleanups in the areas of hugetlb folios, THP folios and migration - "Uncached buffered IO" from Jens Axboe implements the new RWF_DONTCACHE flag which provides synchronous dropbehind for pagecache reading and writing. To permite userspace to address issues with massive buildup of useless pagecache when reading/writing fast devices - "selftests/mm: virtual_address_range: Reduce memory" from Thomas Weißschuh fixes and optimizes some of the MM selftests" * tag 'mm-stable-2025-01-26-14-59' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (321 commits) mm/compaction: fix UBSAN shift-out-of-bounds warning s390/mm: add missing ctor/dtor on page table upgrade kasan: sw_tags: use str_on_off() helper in kasan_init_sw_tags() tools: add VM_WARN_ON_VMG definition mm/damon/core: use str_high_low() helper in damos_wmark_wait_us() seqlock: add missing parameter documentation for raw_seqcount_try_begin() mm/page-writeback: consolidate wb_thresh bumping logic into __wb_calc_thresh mm/page_alloc: remove the incorrect and misleading comment zram: remove zcomp_stream_put() from write_incompressible_page() mm: separate move/undo parts from migrate_pages_batch() mm/kfence: use str_write_read() helper in get_access_type() selftests/mm/mkdirty: fix memory leak in test_uffdio_copy() kasan: hw_tags: Use str_on_off() helper in kasan_init_hw_tags() selftests/mm: virtual_address_range: avoid reading from VM_IO mappings selftests/mm: vm_util: split up /proc/self/smaps parsing selftests/mm: virtual_address_range: unmap chunks after validation selftests/mm: virtual_address_range: mmap() without PROT_WRITE selftests/memfd/memfd_test: fix possible NULL pointer dereference mm: add FGP_DONTCACHE folio creation flag mm: call filemap_fdatawrite_range_kick() after IOCB_DONTCACHE issue ...
Diffstat (limited to 'arch/m68k')
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h7
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h6
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h4
-rw-r--r--arch/m68k/mm/init.c5
-rw-r--r--arch/m68k/mm/mcfmmu.c10
-rw-r--r--arch/m68k/mm/motorola.c26
-rw-r--r--arch/m68k/mm/sun3mmu.c10
-rw-r--r--arch/m68k/sun3/sun3dvma.c6
8 files changed, 31 insertions, 43 deletions
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index 302c5bf67179..4c648b51e7fd 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -37,7 +37,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
{
struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
@@ -61,7 +61,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
{
struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
@@ -73,7 +73,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- pagetable_free(virt_to_ptdesc(pgd));
+ pagetable_dtor_free(virt_to_ptdesc(pgd));
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
@@ -84,6 +84,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
if (!ptdesc)
return NULL;
+ pagetable_pgd_ctor(ptdesc);
new_pgd = ptdesc_address(ptdesc);
memcpy(new_pgd, swapper_pg_dir, PTRS_PER_PGD * sizeof(pgd_t));
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index 74a817d9387f..5abe7da8ac5a 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -9,9 +9,9 @@ extern void mmu_page_ctor(void *page);
extern void mmu_page_dtor(void *page);
enum m68k_table_types {
- TABLE_PGD = 0,
- TABLE_PMD = 0, /* same size as PGD */
- TABLE_PTE = 1,
+ TABLE_PGD,
+ TABLE_PMD,
+ TABLE_PTE,
};
extern void init_pointer_table(void *table, int type);
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 4a137eecb6fe..f1ae4ed890db 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -19,7 +19,7 @@ extern const char bad_pmd_string[];
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
} while (0)
@@ -43,7 +43,7 @@ static inline pgd_t * pgd_alloc(struct mm_struct *mm)
{
pgd_t *new_pgd;
- new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ new_pgd = __pgd_alloc(mm, 0);
memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT));
return new_pgd;
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 1b47bec15832..8b11d0d545aa 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -68,10 +68,7 @@ void __init paging_init(void)
high_memory = (void *) end_mem;
- empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!empty_zero_page)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
+ empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT;
free_area_init(max_zone_pfn);
}
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 9a6fa342e872..19a75029036c 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -42,20 +42,14 @@ void __init paging_init(void)
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
int i;
- empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!empty_zero_page)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
+ empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
pg_dir = swapper_pg_dir;
memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
size = num_pages * sizeof(pte_t);
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
- next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
- if (!next_pgtable)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, size, PAGE_SIZE);
+ next_pgtable = (unsigned long) memblock_alloc_or_panic(size, PAGE_SIZE);
pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index c1761d309fc6..73651e093c4d 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -97,17 +97,19 @@ void mmu_page_dtor(void *page)
typedef struct list_head ptable_desc;
-static struct list_head ptable_list[2] = {
+static struct list_head ptable_list[3] = {
LIST_HEAD_INIT(ptable_list[0]),
LIST_HEAD_INIT(ptable_list[1]),
+ LIST_HEAD_INIT(ptable_list[2]),
};
#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru))
#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
-static const int ptable_shift[2] = {
- 7+2, /* PGD, PMD */
+static const int ptable_shift[3] = {
+ 7+2, /* PGD */
+ 7+2, /* PMD */
6+2, /* PTE */
};
@@ -156,12 +158,20 @@ void *get_pointer_table(int type)
if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
return NULL;
- if (type == TABLE_PTE) {
+ switch (type) {
+ case TABLE_PTE:
/*
* m68k doesn't have SPLIT_PTE_PTLOCKS for not having
* SMP.
*/
pagetable_pte_ctor(virt_to_ptdesc(page));
+ break;
+ case TABLE_PMD:
+ pagetable_pmd_ctor(virt_to_ptdesc(page));
+ break;
+ case TABLE_PGD:
+ pagetable_pgd_ctor(virt_to_ptdesc(page));
+ break;
}
mmu_page_ctor(page);
@@ -200,8 +210,7 @@ int free_pointer_table(void *table, int type)
/* all tables in page are free, free page */
list_del(dp);
mmu_page_dtor((void *)page);
- if (type == TABLE_PTE)
- pagetable_pte_dtor(virt_to_ptdesc((void *)page));
+ pagetable_dtor(virt_to_ptdesc((void *)page));
free_page (page);
return 1;
} else if (ptable_list[type].next != dp) {
@@ -491,10 +500,7 @@ void __init paging_init(void)
* initialize the bad page table and bad page to point
* to a couple of allocated pages
*/
- empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!empty_zero_page)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
+ empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
/*
* Set up SFC/DFC registers
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index 494739c1783e..1ecf6bdd08bf 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -44,10 +44,7 @@ void __init paging_init(void)
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
unsigned long size;
- empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!empty_zero_page)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
+ empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
address = PAGE_OFFSET;
pg_dir = swapper_pg_dir;
@@ -57,10 +54,7 @@ void __init paging_init(void)
size = num_pages * sizeof(pte_t);
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
- next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE);
- if (!next_pgtable)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, size, PAGE_SIZE);
+ next_pgtable = (unsigned long)memblock_alloc_or_panic(size, PAGE_SIZE);
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
/* Map whole memory from PAGE_OFFSET (0x0E000000) */
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c
index 6ebf52740ad7..225fc735e466 100644
--- a/arch/m68k/sun3/sun3dvma.c
+++ b/arch/m68k/sun3/sun3dvma.c
@@ -252,12 +252,8 @@ void __init dvma_init(void)
list_add(&(hole->list), &hole_list);
- iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
+ iommu_use = memblock_alloc_or_panic(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
SMP_CACHE_BYTES);
- if (!iommu_use)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
-
dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
sun3_dvma_init();