summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-11-28 14:53:35 -0500
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-01-08 00:28:32 -0500
commit3506659e18a61ae525f3b9b4f5af23b4b149d4db (patch)
treea6edd50d2dcb142c0e1d2a0c3f5fb8afcaf0d91a /mm
parentefe99bba2862aef24f1b05b786f6bf5acb076209 (diff)
mm: Add unmap_mapping_folio()
Convert both callers of unmap_mapping_page() to call unmap_mapping_folio() instead. Also move zap_details from linux/mm.h to mm/memory.c Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h4
-rw-r--r--mm/memory.c49
-rw-r--r--mm/truncate.c4
3 files changed, 40 insertions, 17 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 3b79a5c9427a..1ca93c6cb18c 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -74,6 +74,7 @@ static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
}
+struct zap_details;
void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
@@ -388,6 +389,7 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
#ifdef CONFIG_MMU
+void unmap_mapping_folio(struct folio *folio);
extern long populate_vma_page_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int *locked);
extern long faultin_vma_page_range(struct vm_area_struct *vma,
@@ -491,8 +493,8 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
}
return fpin;
}
-
#else /* !CONFIG_MMU */
+static inline void unmap_mapping_folio(struct folio *folio) { }
static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { }
static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
diff --git a/mm/memory.c b/mm/memory.c
index 8f1de811a1dc..23f2f1300d42 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1304,6 +1304,28 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
return ret;
}
+/*
+ * Parameter block passed down to zap_pte_range in exceptional cases.
+ */
+struct zap_details {
+ struct address_space *zap_mapping; /* Check page->mapping if set */
+ struct folio *single_folio; /* Locked folio to be unmapped */
+};
+
+/*
+ * We set details->zap_mapping when we want to unmap shared but keep private
+ * pages. Return true if skip zapping this page, false otherwise.
+ */
+static inline bool
+zap_skip_check_mapping(struct zap_details *details, struct page *page)
+{
+ if (!details || !page)
+ return false;
+
+ return details->zap_mapping &&
+ (details->zap_mapping != page_rmapping(page));
+}
+
static unsigned long zap_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
@@ -1443,8 +1465,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
else if (zap_huge_pmd(tlb, vma, pmd, addr))
goto next;
/* fall through */
- } else if (details && details->single_page &&
- PageTransCompound(details->single_page) &&
+ } else if (details && details->single_folio &&
+ folio_test_pmd_mappable(details->single_folio) &&
next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
/*
@@ -3332,31 +3354,30 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
}
/**
- * unmap_mapping_page() - Unmap single page from processes.
- * @page: The locked page to be unmapped.
+ * unmap_mapping_folio() - Unmap single folio from processes.
+ * @folio: The locked folio to be unmapped.
*
- * Unmap this page from any userspace process which still has it mmaped.
+ * Unmap this folio from any userspace process which still has it mmaped.
* Typically, for efficiency, the range of nearby pages has already been
* unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
- * truncation or invalidation holds the lock on a page, it may find that
- * the page has been remapped again: and then uses unmap_mapping_page()
+ * truncation or invalidation holds the lock on a folio, it may find that
+ * the page has been remapped again: and then uses unmap_mapping_folio()
* to unmap it finally.
*/
-void unmap_mapping_page(struct page *page)
+void unmap_mapping_folio(struct folio *folio)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
struct zap_details details = { };
pgoff_t first_index;
pgoff_t last_index;
- VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(PageTail(page));
+ VM_BUG_ON(!folio_test_locked(folio));
- first_index = page->index;
- last_index = page->index + thp_nr_pages(page) - 1;
+ first_index = folio->index;
+ last_index = folio->index + folio_nr_pages(folio) - 1;
details.zap_mapping = mapping;
- details.single_page = page;
+ details.single_folio = folio;
i_mmap_lock_write(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
diff --git a/mm/truncate.c b/mm/truncate.c
index ab86b07c1e9c..c98feea75a10 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -180,7 +180,7 @@ void do_invalidatepage(struct page *page, unsigned int offset,
static void truncate_cleanup_folio(struct folio *folio)
{
if (folio_mapped(folio))
- unmap_mapping_page(&folio->page);
+ unmap_mapping_folio(folio);
if (folio_has_private(folio))
do_invalidatepage(&folio->page, 0, folio_size(folio));
@@ -670,7 +670,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
wait_on_page_writeback(page);
if (page_mapped(page))
- unmap_mapping_page(page);
+ unmap_mapping_folio(page_folio(page));
BUG_ON(page_mapped(page));
ret2 = do_launder_page(mapping, page);