summaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-08-29 14:25:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-08-29 14:25:26 -0700
commitb96a3e9142fdf346b05b20e867b4f0dfca119e96 (patch)
treeb338a8f8930abc24888fc3871c6627f6ad46e23b /arch/arm
parent651a00bc56403161351090a9d7ddbd7095975324 (diff)
parent52ae298e3e5c9be5bb95e1c6d9199e5210f2a156 (diff)
Merge tag 'mm-stable-2023-08-28-18-26' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton: - Some swap cleanups from Ma Wupeng ("fix WARN_ON in add_to_avail_list") - Peter Xu has a series (mm/gup: Unify hugetlb, speed up thp") which reduces the special-case code for handling hugetlb pages in GUP. It also speeds up GUP handling of transparent hugepages. - Peng Zhang provides some maple tree speedups ("Optimize the fast path of mas_store()"). - Sergey Senozhatsky has improved te performance of zsmalloc during compaction (zsmalloc: small compaction improvements"). - Domenico Cerasuolo has developed additional selftest code for zswap ("selftests: cgroup: add zswap test program"). - xu xin has doe some work on KSM's handling of zero pages. These changes are mainly to enable the user to better understand the effectiveness of KSM's treatment of zero pages ("ksm: support tracking KSM-placed zero-pages"). - Jeff Xu has fixes the behaviour of memfd's MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED sysctl ("mm/memfd: fix sysctl MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED"). - David Howells has fixed an fscache optimization ("mm, netfs, fscache: Stop read optimisation when folio removed from pagecache"). - Axel Rasmussen has given userfaultfd the ability to simulate memory poisoning ("add UFFDIO_POISON to simulate memory poisoning with UFFD"). - Miaohe Lin has contributed some routine maintenance work on the memory-failure code ("mm: memory-failure: remove unneeded PageHuge() check"). - Peng Zhang has contributed some maintenance work on the maple tree code ("Improve the validation for maple tree and some cleanup"). - Hugh Dickins has optimized the collapsing of shmem or file pages into THPs ("mm: free retracted page table by RCU"). - Jiaqi Yan has a patch series which permits us to use the healthy subpages within a hardware poisoned huge page for general purposes ("Improve hugetlbfs read on HWPOISON hugepages"). - Kemeng Shi has done some maintenance work on the pagetable-check code ("Remove unused parameters in page_table_check"). - More folioification work from Matthew Wilcox ("More filesystem folio conversions for 6.6"), ("Followup folio conversions for zswap"). And from ZhangPeng ("Convert several functions in page_io.c to use a folio"). - page_ext cleanups from Kemeng Shi ("minor cleanups for page_ext"). - Baoquan He has converted some architectures to use the GENERIC_IOREMAP ioremap()/iounmap() code ("mm: ioremap: Convert architectures to take GENERIC_IOREMAP way"). - Anshuman Khandual has optimized arm64 tlb shootdown ("arm64: support batched/deferred tlb shootdown during page reclamation/migration"). - Better maple tree lockdep checking from Liam Howlett ("More strict maple tree lockdep"). Liam also developed some efficiency improvements ("Reduce preallocations for maple tree"). - Cleanup and optimization to the secondary IOMMU TLB invalidation, from Alistair Popple ("Invalidate secondary IOMMU TLB on permission upgrade"). - Ryan Roberts fixes some arm64 MM selftest issues ("selftests/mm fixes for arm64"). - Kemeng Shi provides some maintenance work on the compaction code ("Two minor cleanups for compaction"). - Some reduction in mmap_lock pressure from Matthew Wilcox ("Handle most file-backed faults under the VMA lock"). - Aneesh Kumar contributes code to use the vmemmap optimization for DAX on ppc64, under some circumstances ("Add support for DAX vmemmap optimization for ppc64"). - page-ext cleanups from Kemeng Shi ("add page_ext_data to get client data in page_ext"), ("minor cleanups to page_ext header"). - Some zswap cleanups from Johannes Weiner ("mm: zswap: three cleanups"). - kmsan cleanups from ZhangPeng ("minor cleanups for kmsan"). - VMA handling cleanups from Kefeng Wang ("mm: convert to vma_is_initial_heap/stack()"). - DAMON feature work from SeongJae Park ("mm/damon/sysfs-schemes: implement DAMOS tried total bytes file"), ("Extend DAMOS filters for address ranges and DAMON monitoring targets"). - Compaction work from Kemeng Shi ("Fixes and cleanups to compaction"). - Liam Howlett has improved the maple tree node replacement code ("maple_tree: Change replacement strategy"). - ZhangPeng has a general code cleanup - use the K() macro more widely ("cleanup with helper macro K()"). - Aneesh Kumar brings memmap-on-memory to ppc64 ("Add support for memmap on memory feature on ppc64"). - pagealloc cleanups from Kemeng Shi ("Two minor cleanups for pcp list in page_alloc"), ("Two minor cleanups for get pageblock migratetype"). - Vishal Moola introduces a memory descriptor for page table tracking, "struct ptdesc" ("Split ptdesc from struct page"). - memfd selftest maintenance work from Aleksa Sarai ("memfd: cleanups for vm.memfd_noexec"). - MM include file rationalization from Hugh Dickins ("arch: include asm/cacheflush.h in asm/hugetlb.h"). - THP debug output fixes from Hugh Dickins ("mm,thp: fix sloppy text output"). - kmemleak improvements from Xiaolei Wang ("mm/kmemleak: use object_cache instead of kmemleak_initialized"). - More folio-related cleanups from Matthew Wilcox ("Remove _folio_dtor and _folio_order"). - A VMA locking scalability improvement from Suren Baghdasaryan ("Per-VMA lock support for swap and userfaults"). - pagetable handling cleanups from Matthew Wilcox ("New page table range API"). - A batch of swap/thp cleanups from David Hildenbrand ("mm/swap: stop using page->private on tail pages for THP_SWAP + cleanups"). - Cleanups and speedups to the hugetlb fault handling from Matthew Wilcox ("Change calling convention for ->huge_fault"). - Matthew Wilcox has also done some maintenance work on the MM subsystem documentation ("Improve mm documentation"). * tag 'mm-stable-2023-08-28-18-26' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (489 commits) maple_tree: shrink struct maple_tree maple_tree: clean up mas_wr_append() secretmem: convert page_is_secretmem() to folio_is_secretmem() nios2: fix flush_dcache_page() for usage from irq context hugetlb: add documentation for vma_kernel_pagesize() mm: add orphaned kernel-doc to the rst files. mm: fix clean_record_shared_mapping_range kernel-doc mm: fix get_mctgt_type() kernel-doc mm: fix kernel-doc warning from tlb_flush_rmaps() mm: remove enum page_entry_size mm: allow ->huge_fault() to be called without the mmap_lock held mm: move PMD_ORDER to pgtable.h mm: remove checks for pte_index memcg: remove duplication detection for mem_cgroup_uncharge_swap mm/huge_memory: work on folio->swap instead of page->private when splitting folio mm/swap: inline folio_set_swap_entry() and folio_swap_entry() mm/swap: use dedicated entry for swap in folio mm/swap: stop using page->private on tail pages for THP_SWAP selftests/mm: fix WARNING comparing pointer to 0 selftests: cgroup: fix test_kmem_memcg_deletion kernel mem check ...
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/cacheflush.h29
-rw-r--r--arch/arm/include/asm/hugetlb.h1
-rw-r--r--arch/arm/include/asm/pgtable.h5
-rw-r--r--arch/arm/include/asm/tlb.h12
-rw-r--r--arch/arm/include/asm/tlbflush.h14
-rw-r--r--arch/arm/mm/copypage-v4mc.c5
-rw-r--r--arch/arm/mm/copypage-v6.c5
-rw-r--r--arch/arm/mm/copypage-xscale.c5
-rw-r--r--arch/arm/mm/dma-mapping.c28
-rw-r--r--arch/arm/mm/fault-armv.c19
-rw-r--r--arch/arm/mm/flush.c99
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/arm/mm/mmu.c21
-rw-r--r--arch/arm/mm/nommu.c6
-rw-r--r--arch/arm/mm/pageattr.c6
15 files changed, 151 insertions, 106 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index a094f964c869..f6181f69577f 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -231,14 +231,15 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
vma->vm_flags);
}
-static inline void
-vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
+static inline void vivt_flush_cache_pages(struct vm_area_struct *vma,
+ unsigned long user_addr, unsigned long pfn, unsigned int nr)
{
struct mm_struct *mm = vma->vm_mm;
if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
unsigned long addr = user_addr & PAGE_MASK;
- __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
+ __cpuc_flush_user_range(addr, addr + nr * PAGE_SIZE,
+ vma->vm_flags);
}
}
@@ -247,15 +248,17 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
vivt_flush_cache_mm(mm)
#define flush_cache_range(vma,start,end) \
vivt_flush_cache_range(vma,start,end)
-#define flush_cache_page(vma,addr,pfn) \
- vivt_flush_cache_page(vma,addr,pfn)
+#define flush_cache_pages(vma, addr, pfn, nr) \
+ vivt_flush_cache_pages(vma, addr, pfn, nr)
#else
-extern void flush_cache_mm(struct mm_struct *mm);
-extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
-extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr,
+ unsigned long pfn, unsigned int nr);
#endif
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+#define flush_cache_page(vma, addr, pfn) flush_cache_pages(vma, addr, pfn, 1)
/*
* flush_icache_user_range is used when we want to ensure that the
@@ -289,7 +292,9 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
* See update_mmu_cache for the user space part.
*/
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page *);
+void flush_dcache_page(struct page *);
+void flush_dcache_folio(struct folio *folio);
+#define flush_dcache_folio flush_dcache_folio
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
static inline void flush_kernel_vmap_range(void *addr, int size)
@@ -317,12 +322,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
/*
- * We don't appear to need to do anything here. In fact, if we did, we'd
- * duplicate cache flushing elsewhere performed by flush_dcache_page().
- */
-#define flush_icache_page(vma,page) do { } while (0)
-
-/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
* caches, since the direct-mappings of these pages may contain cached
diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h
index d02d6ca88e92..a3a82b7158d4 100644
--- a/arch/arm/include/asm/hugetlb.h
+++ b/arch/arm/include/asm/hugetlb.h
@@ -10,6 +10,7 @@
#ifndef _ASM_ARM_HUGETLB_H
#define _ASM_ARM_HUGETLB_H
+#include <asm/cacheflush.h>
#include <asm/page.h>
#include <asm/hugetlb-3level.h>
#include <asm-generic/hugetlb.h>
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 34662a9d4cab..ba573f22d7cc 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -207,8 +207,9 @@ static inline void __sync_icache_dcache(pte_t pteval)
extern void __sync_icache_dcache(pte_t pteval);
#endif
-void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval);
+void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval, unsigned int nr);
+#define set_ptes set_ptes
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index b8cbe03ad260..f40d06ad5d2a 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -39,7 +39,9 @@ static inline void __tlb_remove_table(void *_table)
static inline void
__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
{
- pgtable_pte_page_dtor(pte);
+ struct ptdesc *ptdesc = page_ptdesc(pte);
+
+ pagetable_pte_dtor(ptdesc);
#ifndef CONFIG_ARM_LPAE
/*
@@ -50,17 +52,17 @@ __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
__tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
#endif
- tlb_remove_table(tlb, pte);
+ tlb_remove_ptdesc(tlb, ptdesc);
}
static inline void
__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
{
#ifdef CONFIG_ARM_LPAE
- struct page *page = virt_to_page(pmdp);
+ struct ptdesc *ptdesc = virt_to_ptdesc(pmdp);
- pgtable_pmd_page_dtor(page);
- tlb_remove_table(tlb, page);
+ pagetable_pmd_dtor(ptdesc);
+ tlb_remove_ptdesc(tlb, ptdesc);
#endif
}
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 0ccc985b90af..38c6e4a2a0b6 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -619,18 +619,22 @@ extern void flush_bp_all(void);
* If PG_dcache_clean is not set for the page, we need to ensure that any
* cache entries for the kernels virtual memory range are written
* back to the page. On ARMv6 and later, the cache coherency is handled via
- * the set_pte_at() function.
+ * the set_ptes() function.
*/
#if __LINUX_ARM_ARCH__ < 6
-extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep);
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr);
#else
-static inline void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+ unsigned int nr)
{
}
#endif
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
+
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
#endif
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index f1da3b439b96..7ddd82b9fe8b 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -64,10 +64,11 @@ static void mc_copy_user_page(void *from, void *to)
void v4_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
+ struct folio *src = page_folio(from);
void *kto = kmap_atomic(to);
- if (!test_and_set_bit(PG_dcache_clean, &from->flags))
- __flush_dcache_page(page_mapping_file(from), from);
+ if (!test_and_set_bit(PG_dcache_clean, &src->flags))
+ __flush_dcache_folio(folio_flush_mapping(src), src);
raw_spin_lock(&minicache_lock);
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index d8a115de5507..a1a71f36d850 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -69,11 +69,12 @@ static void discard_old_kernel_data(void *kto)
static void v6_copy_user_highpage_aliasing(struct page *to,
struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
+ struct folio *src = page_folio(from);
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto;
- if (!test_and_set_bit(PG_dcache_clean, &from->flags))
- __flush_dcache_page(page_mapping_file(from), from);
+ if (!test_and_set_bit(PG_dcache_clean, &src->flags))
+ __flush_dcache_folio(folio_flush_mapping(src), src);
/* FIXME: not highmem safe */
discard_old_kernel_data(page_address(to));
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index bcb485620a05..f1e29d3e8193 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -84,10 +84,11 @@ static void mc_copy_user_page(void *from, void *to)
void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
+ struct folio *src = page_folio(from);
void *kto = kmap_atomic(to);
- if (!test_and_set_bit(PG_dcache_clean, &from->flags))
- __flush_dcache_page(page_mapping_file(from), from);
+ if (!test_and_set_bit(PG_dcache_clean, &src->flags))
+ __flush_dcache_folio(folio_flush_mapping(src), src);
raw_spin_lock(&minicache_lock);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 033a1bce2b17..5409225b4abc 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -709,19 +709,21 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
* Mark the D-cache clean for these pages to avoid extra flushing.
*/
if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
- unsigned long pfn;
- size_t left = size;
-
- pfn = page_to_pfn(page) + off / PAGE_SIZE;
- off %= PAGE_SIZE;
- if (off) {
- pfn++;
- left -= PAGE_SIZE - off;
- }
- while (left >= PAGE_SIZE) {
- page = pfn_to_page(pfn++);
- set_bit(PG_dcache_clean, &page->flags);
- left -= PAGE_SIZE;
+ struct folio *folio = pfn_folio(paddr / PAGE_SIZE);
+ size_t offset = offset_in_folio(folio, paddr);
+
+ for (;;) {
+ size_t sz = folio_size(folio) - offset;
+
+ if (size < sz)
+ break;
+ if (!offset)
+ set_bit(PG_dcache_clean, &folio->flags);
+ offset = 0;
+ size -= sz;
+ if (!size)
+ break;
+ folio = folio_next(folio);
}
}
}
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index ca5302b0b7ee..2286c2ea60ec 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -117,11 +117,10 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
* must use the nested version. This also means we need to
* open-code the spin-locking.
*/
- pte = pte_offset_map(pmd, address);
+ pte = pte_offset_map_nolock(vma->vm_mm, pmd, address, &ptl);
if (!pte)
return 0;
- ptl = pte_lockptr(vma->vm_mm, pmd);
do_pte_lock(ptl);
ret = do_adjust_pte(vma, address, pfn, pte);
@@ -181,12 +180,12 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
*
* Note that the pte lock will be held.
*/
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep)
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr)
{
unsigned long pfn = pte_pfn(*ptep);
struct address_space *mapping;
- struct page *page;
+ struct folio *folio;
if (!pfn_valid(pfn))
return;
@@ -195,13 +194,13 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
* The zero page is never written to, so never has any dirty
* cache lines, and therefore never needs to be flushed.
*/
- page = pfn_to_page(pfn);
- if (page == ZERO_PAGE(0))
+ if (is_zero_pfn(pfn))
return;
- mapping = page_mapping_file(page);
- if (!test_and_set_bit(PG_dcache_clean, &page->flags))
- __flush_dcache_page(mapping, page);
+ folio = page_folio(pfn_to_page(pfn));
+ mapping = folio_flush_mapping(folio);
+ if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+ __flush_dcache_folio(mapping, folio);
if (mapping) {
if (cache_is_vivt())
make_coherent(mapping, vma, addr, ptep, pfn);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 2508be91b7a0..d19d140a10c7 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -95,10 +95,10 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
__flush_icache_all();
}
-void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
+void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn, unsigned int nr)
{
if (cache_is_vivt()) {
- vivt_flush_cache_page(vma, user_addr, pfn);
+ vivt_flush_cache_pages(vma, user_addr, pfn, nr);
return;
}
@@ -196,29 +196,31 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
#endif
}
-void __flush_dcache_page(struct address_space *mapping, struct page *page)
+void __flush_dcache_folio(struct address_space *mapping, struct folio *folio)
{
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
* coherent with the kernels mapping.
*/
- if (!PageHighMem(page)) {
- __cpuc_flush_dcache_area(page_address(page), page_size(page));
+ if (!folio_test_highmem(folio)) {
+ __cpuc_flush_dcache_area(folio_address(folio),
+ folio_size(folio));
} else {
unsigned long i;
if (cache_is_vipt_nonaliasing()) {
- for (i = 0; i < compound_nr(page); i++) {
- void *addr = kmap_atomic(page + i);
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ void *addr = kmap_local_folio(folio,
+ i * PAGE_SIZE);
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
} else {
- for (i = 0; i < compound_nr(page); i++) {
- void *addr = kmap_high_get(page + i);
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ void *addr = kmap_high_get(folio_page(folio, i));
if (addr) {
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
- kunmap_high(page + i);
+ kunmap_high(folio_page(folio, i));
}
}
}
@@ -230,15 +232,14 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
* userspace colour, which is congruent with page->index.
*/
if (mapping && cache_is_vipt_aliasing())
- flush_pfn_alias(page_to_pfn(page),
- page->index << PAGE_SHIFT);
+ flush_pfn_alias(folio_pfn(folio), folio_pos(folio));
}
-static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
+static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio)
{
struct mm_struct *mm = current->active_mm;
- struct vm_area_struct *mpnt;
- pgoff_t pgoff;
+ struct vm_area_struct *vma;
+ pgoff_t pgoff, pgoff_end;
/*
* There are possible user space mappings of this page:
@@ -246,21 +247,36 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
* data in the current VM view associated with this page.
* - aliasing VIPT: we only need to find one mapping of this page.
*/
- pgoff = page->index;
+ pgoff = folio->index;
+ pgoff_end = pgoff + folio_nr_pages(folio) - 1;
flush_dcache_mmap_lock(mapping);
- vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
- unsigned long offset;
+ vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) {
+ unsigned long start, offset, pfn;
+ unsigned int nr;
/*
* If this VMA is not in our MM, we can ignore it.
*/
- if (mpnt->vm_mm != mm)
+ if (vma->vm_mm != mm)
continue;
- if (!(mpnt->vm_flags & VM_MAYSHARE))
+ if (!(vma->vm_flags & VM_MAYSHARE))
continue;
- offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
- flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
+
+ start = vma->vm_start;
+ pfn = folio_pfn(folio);
+ nr = folio_nr_pages(folio);
+ offset = pgoff - vma->vm_pgoff;
+ if (offset > -nr) {
+ pfn -= offset;
+ nr += offset;
+ } else {
+ start += offset * PAGE_SIZE;
+ }
+ if (start + nr * PAGE_SIZE > vma->vm_end)
+ nr = (vma->vm_end - start) / PAGE_SIZE;
+
+ flush_cache_pages(vma, start, pfn, nr);
}
flush_dcache_mmap_unlock(mapping);
}
@@ -269,7 +285,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
void __sync_icache_dcache(pte_t pteval)
{
unsigned long pfn;
- struct page *page;
+ struct folio *folio;
struct address_space *mapping;
if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
@@ -279,14 +295,14 @@ void __sync_icache_dcache(pte_t pteval)
if (!pfn_valid(pfn))
return;
- page = pfn_to_page(pfn);
+ folio = page_folio(pfn_to_page(pfn));
if (cache_is_vipt_aliasing())
- mapping = page_mapping_file(page);
+ mapping = folio_flush_mapping(folio);
else
mapping = NULL;
- if (!test_and_set_bit(PG_dcache_clean, &page->flags))
- __flush_dcache_page(mapping, page);
+ if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+ __flush_dcache_folio(mapping, folio);
if (pte_exec(pteval))
__flush_icache_all();
@@ -312,7 +328,7 @@ void __sync_icache_dcache(pte_t pteval)
* Note that we disable the lazy flush for SMP configurations where
* the cache maintenance operations are not automatically broadcasted.
*/
-void flush_dcache_page(struct page *page)
+void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping;
@@ -320,31 +336,36 @@ void flush_dcache_page(struct page *page)
* The zero page is never written to, so never has any dirty
* cache lines, and therefore never needs to be flushed.
*/
- if (page == ZERO_PAGE(0))
+ if (is_zero_pfn(folio_pfn(folio)))
return;
if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
- if (test_bit(PG_dcache_clean, &page->flags))
- clear_bit(PG_dcache_clean, &page->flags);
+ if (test_bit(PG_dcache_clean, &folio->flags))
+ clear_bit(PG_dcache_clean, &folio->flags);
return;
}
- mapping = page_mapping_file(page);
+ mapping = folio_flush_mapping(folio);
if (!cache_ops_need_broadcast() &&
- mapping && !page_mapcount(page))
- clear_bit(PG_dcache_clean, &page->flags);
+ mapping && !folio_mapped(folio))
+ clear_bit(PG_dcache_clean, &folio->flags);
else {
- __flush_dcache_page(mapping, page);
+ __flush_dcache_folio(mapping, folio);
if (mapping && cache_is_vivt())
- __flush_dcache_aliases(mapping, page);
+ __flush_dcache_aliases(mapping, folio);
else if (mapping)
__flush_icache_all();
- set_bit(PG_dcache_clean, &page->flags);
+ set_bit(PG_dcache_clean, &folio->flags);
}
}
-EXPORT_SYMBOL(flush_dcache_page);
+EXPORT_SYMBOL(flush_dcache_folio);
+void flush_dcache_page(struct page *page)
+{
+ flush_dcache_folio(page_folio(page));
+}
+EXPORT_SYMBOL(flush_dcache_page);
/*
* Flush an anonymous page so that users of get_user_pages()
* can safely access the data. The expected sequence is:
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d7ffccb7fea7..419316316711 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -45,7 +45,7 @@ struct mem_type {
const struct mem_type *get_mem_type(unsigned int type);
-extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
+void __flush_dcache_folio(struct address_space *mapping, struct folio *folio);
/*
* ARM specific vm_struct->flags bits.
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 13fc4bb5f792..674ed71573a8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -737,11 +737,12 @@ static void __init *early_alloc(unsigned long sz)
static void *__init late_alloc(unsigned long sz)
{
- void *ptr = (void *)__get_free_pages(GFP_PGTABLE_KERNEL, get_order(sz));
+ void *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & ~__GFP_HIGHMEM,
+ get_order(sz));
- if (!ptr || !pgtable_pte_page_ctor(virt_to_page(ptr)))
+ if (!ptdesc || !pagetable_pte_ctor(ptdesc))
BUG();
- return ptr;
+ return ptdesc_to_virt(ptdesc);
}
static pte_t * __init arm_pte_alloc(pmd_t *pmd, unsigned long addr,
@@ -1788,7 +1789,7 @@ void __init paging_init(const struct machine_desc *mdesc)
bootmem_init();
empty_zero_page = virt_to_page(zero_page);
- __flush_dcache_page(NULL, empty_zero_page);
+ __flush_dcache_folio(NULL, page_folio(empty_zero_page));
}
void __init early_mm_init(const struct machine_desc *mdesc)
@@ -1797,8 +1798,8 @@ void __init early_mm_init(const struct machine_desc *mdesc)
early_paging_init(mdesc);
}
-void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
+void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval, unsigned int nr)
{
unsigned long ext = 0;
@@ -1808,5 +1809,11 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr,
ext |= PTE_EXT_NG;
}
- set_pte_ext(ptep, pteval, ext);
+ for (;;) {
+ set_pte_ext(ptep, pteval, ext);
+ if (--nr == 0)
+ break;
+ ptep++;
+ pte_val(pteval) += PAGE_SIZE;
+ }
}
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 43cfd06bbeba..c415f3859b20 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -180,6 +180,12 @@ void setup_mm_for_reboot(void)
{
}
+void flush_dcache_folio(struct folio *folio)
+{
+ __cpuc_flush_dcache_area(folio_address(folio), folio_size(folio));
+}
+EXPORT_SYMBOL(flush_dcache_folio);
+
void flush_dcache_page(struct page *page)
{
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index c3c34fe714b0..064ad508c149 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -25,7 +25,7 @@ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
return 0;
}
-static bool in_range(unsigned long start, unsigned long size,
+static bool range_in_range(unsigned long start, unsigned long size,
unsigned long range_start, unsigned long range_end)
{
return start >= range_start && start < range_end &&
@@ -63,8 +63,8 @@ static int change_memory_common(unsigned long addr, int numpages,
if (!size)
return 0;
- if (!in_range(start, size, MODULES_VADDR, MODULES_END) &&
- !in_range(start, size, VMALLOC_START, VMALLOC_END))
+ if (!range_in_range(start, size, MODULES_VADDR, MODULES_END) &&
+ !range_in_range(start, size, VMALLOC_START, VMALLOC_END))
return -EINVAL;
return __change_memory_common(start, size, set_mask, clear_mask);