summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--include/linux/mm.h4
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/memory.c34
5 files changed, 21 insertions, 26 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 6df794ed4066..9456e1d55540 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -892,7 +892,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
error = PTR_ERR(folio);
goto out;
}
- clear_huge_page(&folio->page, addr, pages_per_huge_page(h));
+ folio_zero_user(folio, ALIGN_DOWN(addr, hpage_size));
__folio_mark_uptodate(folio);
error = hugetlb_add_to_page_cache(folio, mapping, index);
if (unlikely(error)) {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 101945baffc7..e2140ea6ae98 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4067,9 +4067,7 @@ enum mf_action_page_type {
};
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
-extern void clear_huge_page(struct page *page,
- unsigned long addr_hint,
- unsigned int pages_per_huge_page);
+void folio_zero_user(struct folio *folio, unsigned long addr_hint);
int copy_user_large_folio(struct folio *dst, struct folio *src,
unsigned long addr_hint,
struct vm_area_struct *vma);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 14a05c643806..be598e9a5f98 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -944,10 +944,10 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
goto release;
}
- clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
+ folio_zero_user(folio, vmf->address);
/*
* The memory barrier inside __folio_mark_uptodate makes sure that
- * clear_huge_page writes become visible before the set_pmd_at()
+ * folio_zero_user writes become visible before the set_pmd_at()
* write.
*/
__folio_mark_uptodate(folio);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6a5ea898e4da..a47f8c6c37c2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6300,8 +6300,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
ret = 0;
goto out;
}
- clear_huge_page(&folio->page, vmf->real_address,
- pages_per_huge_page(h));
+ folio_zero_user(folio, vmf->real_address);
__folio_mark_uptodate(folio);
new_folio = true;
diff --git a/mm/memory.c b/mm/memory.c
index 97ddba866e43..cb26f8713db4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4488,7 +4488,7 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
goto next;
}
folio_throttle_swaprate(folio, gfp);
- clear_huge_page(&folio->page, vmf->address, 1 << order);
+ folio_zero_user(folio, vmf->address);
return folio;
}
next:
@@ -6441,41 +6441,39 @@ static inline int process_huge_page(
return 0;
}
-static void clear_gigantic_page(struct page *page,
- unsigned long addr,
+static void clear_gigantic_page(struct folio *folio, unsigned long addr,
unsigned int pages_per_huge_page)
{
int i;
- struct page *p;
might_sleep();
for (i = 0; i < pages_per_huge_page; i++) {
- p = nth_page(page, i);
cond_resched();
- clear_user_highpage(p, addr + i * PAGE_SIZE);
+ clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
}
}
static int clear_subpage(unsigned long addr, int idx, void *arg)
{
- struct page *page = arg;
+ struct folio *folio = arg;
- clear_user_highpage(nth_page(page, idx), addr);
+ clear_user_highpage(folio_page(folio, idx), addr);
return 0;
}
-void clear_huge_page(struct page *page,
- unsigned long addr_hint, unsigned int pages_per_huge_page)
+/**
+ * folio_zero_user - Zero a folio which will be mapped to userspace.
+ * @folio: The folio to zero.
+ * @addr_hint: The address will be accessed or the base address if uncelar.
+ */
+void folio_zero_user(struct folio *folio, unsigned long addr_hint)
{
- unsigned long addr = addr_hint &
- ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
+ unsigned int nr_pages = folio_nr_pages(folio);
- if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
- clear_gigantic_page(page, addr, pages_per_huge_page);
- return;
- }
-
- process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
+ if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
+ clear_gigantic_page(folio, addr_hint, nr_pages);
+ else
+ process_huge_page(addr_hint, nr_pages, clear_subpage, folio);
}
static int copy_user_gigantic_page(struct folio *dst, struct folio *src,