summaryrefslogtreecommitdiff
path: root/mm/khugepaged.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/khugepaged.c')
-rw-r--r--mm/khugepaged.c568
1 files changed, 298 insertions, 270 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 38830174608f..15203ea7d007 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -4,7 +4,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
-#include <linux/sched/coredump.h>
#include <linux/mmu_notifier.h>
#include <linux/rmap.h>
#include <linux/swap.h>
@@ -20,6 +19,7 @@
#include <linux/rcupdate_wait.h>
#include <linux/swapops.h>
#include <linux/shmem_fs.h>
+#include <linux/dax.h>
#include <linux/ksm.h>
#include <asm/tlb.h>
@@ -85,7 +85,7 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
*
* Note that these are only respected if collapse was initiated by khugepaged.
*/
-static unsigned int khugepaged_max_ptes_none __read_mostly;
+unsigned int khugepaged_max_ptes_none __read_mostly;
static unsigned int khugepaged_max_ptes_swap __read_mostly;
static unsigned int khugepaged_max_ptes_shared __read_mostly;
@@ -385,10 +385,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
int __init khugepaged_init(void)
{
- mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
- sizeof(struct khugepaged_mm_slot),
- __alignof__(struct khugepaged_mm_slot),
- 0, NULL);
+ mm_slot_cache = KMEM_CACHE(khugepaged_mm_slot, 0);
if (!mm_slot_cache)
return -ENOMEM;
@@ -416,6 +413,30 @@ static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
test_bit(MMF_DISABLE_THP, &mm->flags);
}
+static bool hugepage_pmd_enabled(void)
+{
+ /*
+ * We cover the anon, shmem and the file-backed case here; file-backed
+ * hugepages, when configured in, are determined by the global control.
+ * Anon pmd-sized hugepages are determined by the pmd-size control.
+ * Shmem pmd-sized hugepages are also determined by its pmd-size control,
+ * except when the global shmem_huge is set to SHMEM_HUGE_DENY.
+ */
+ if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
+ hugepage_global_enabled())
+ return true;
+ if (test_bit(PMD_ORDER, &huge_anon_orders_always))
+ return true;
+ if (test_bit(PMD_ORDER, &huge_anon_orders_madvise))
+ return true;
+ if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) &&
+ hugepage_global_enabled())
+ return true;
+ if (IS_ENABLED(CONFIG_SHMEM) && shmem_hpage_pmd_enabled())
+ return true;
+ return false;
+}
+
void __khugepaged_enter(struct mm_struct *mm)
{
struct khugepaged_mm_slot *mm_slot;
@@ -452,8 +473,8 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
- hugepage_flags_enabled()) {
- if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
+ hugepage_pmd_enabled()) {
+ if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
PMD_ORDER))
__khugepaged_enter(vma->vm_mm);
}
@@ -527,17 +548,6 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
}
}
-static bool is_refcount_suitable(struct folio *folio)
-{
- int expected_refcount;
-
- expected_refcount = folio_mapcount(folio);
- if (folio_test_swapcache(folio))
- expected_refcount += folio_nr_pages(folio);
-
- return folio_ref_count(folio) == expected_refcount;
-}
-
static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
unsigned long address,
pte_t *pte,
@@ -583,7 +593,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
folio = page_folio(page);
VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
- if (page_mapcount(page) > 1) {
+ /* See hpage_collapse_scan_pmd(). */
+ if (folio_maybe_mapped_shared(folio)) {
++shared;
if (cc->is_khugepaged &&
shared > khugepaged_max_ptes_shared) {
@@ -607,8 +618,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
}
/*
- * We can do it before isolate_lru_page because the
- * page can't be freed from under us. NOTE: PG_lock
+ * We can do it before folio_isolate_lru because the
+ * folio can't be freed from under us. NOTE: PG_lock
* is needed to serialize against split_huge_page
* when invoked from the VM.
*/
@@ -628,7 +639,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
* but not from this process. The other process cannot write to
* the page, only trigger CoW.
*/
- if (!is_refcount_suitable(folio)) {
+ if (folio_expected_ref_count(folio) != folio_ref_count(folio)) {
folio_unlock(folio);
result = SCAN_PAGE_COUNT;
goto out;
@@ -672,13 +683,13 @@ next:
result = SCAN_LACK_REFERENCED_PAGE;
} else {
result = SCAN_SUCCEED;
- trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
+ trace_mm_collapse_huge_page_isolate(folio, none_or_zero,
referenced, writable, result);
return result;
}
out:
release_pte_pages(pte, _pte, compound_pagelist);
- trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
+ trace_mm_collapse_huge_page_isolate(folio, none_or_zero,
referenced, writable, result);
return result;
}
@@ -722,7 +733,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte,
ptep_clear(vma->vm_mm, address, _pte);
folio_remove_rmap_pte(src, src_page, vma);
spin_unlock(ptl);
- free_page_and_swap_cache(src_page);
+ free_folio_and_swap_cache(src);
}
}
@@ -767,7 +778,7 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
* Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
*
* @pte: starting of the PTEs to copy from
- * @page: the new hugepage to copy contents to
+ * @folio: the new hugepage to copy contents to
* @pmd: pointer to the new hugepage's PMD
* @orig_pmd: the original raw pages' PMD
* @vma: the original raw pages' virtual memory area
@@ -775,33 +786,29 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
* @ptl: lock on raw pages' PTEs
* @compound_pagelist: list that stores compound pages
*/
-static int __collapse_huge_page_copy(pte_t *pte,
- struct page *page,
- pmd_t *pmd,
- pmd_t orig_pmd,
- struct vm_area_struct *vma,
- unsigned long address,
- spinlock_t *ptl,
- struct list_head *compound_pagelist)
+static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
+ pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
+ unsigned long address, spinlock_t *ptl,
+ struct list_head *compound_pagelist)
{
- struct page *src_page;
- pte_t *_pte;
- pte_t pteval;
- unsigned long _address;
+ unsigned int i;
int result = SCAN_SUCCEED;
/*
* Copying pages' contents is subject to memory poison at any iteration.
*/
- for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR;
- _pte++, page++, _address += PAGE_SIZE) {
- pteval = ptep_get(_pte);
+ for (i = 0; i < HPAGE_PMD_NR; i++) {
+ pte_t pteval = ptep_get(pte + i);
+ struct page *page = folio_page(folio, i);
+ unsigned long src_addr = address + i * PAGE_SIZE;
+ struct page *src_page;
+
if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
- clear_user_highpage(page, _address);
+ clear_user_highpage(page, src_addr);
continue;
}
src_page = pte_page(pteval);
- if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) {
+ if (copy_mc_user_highpage(page, src_page, src_addr, vma) > 0) {
result = SCAN_COPY_MC;
break;
}
@@ -891,20 +898,6 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
}
#endif
-static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
- nodemask_t *nmask)
-{
- *folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
-
- if (unlikely(!*folio)) {
- count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
- return false;
- }
-
- count_vm_event(THP_COLLAPSE_ALLOC);
- return true;
-}
-
/*
* If mmap_lock temporarily dropped, revalidate vma
* before taking mmap_lock.
@@ -917,6 +910,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
struct collapse_control *cc)
{
struct vm_area_struct *vma;
+ unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
return SCAN_ANY_PROCESS;
@@ -927,8 +921,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
return SCAN_ADDRESS_RANGE;
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
- cc->is_khugepaged, PMD_ORDER))
+ if (!thp_vma_allowable_order(vma, vma->vm_flags, tva_flags, PMD_ORDER))
return SCAN_VMA_CHECK;
/*
* Anon VMA expected, the address may be unmapped then
@@ -942,17 +935,10 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
return SCAN_SUCCEED;
}
-static int find_pmd_or_thp_or_none(struct mm_struct *mm,
- unsigned long address,
- pmd_t **pmd)
+static inline int check_pmd_state(pmd_t *pmd)
{
- pmd_t pmde;
+ pmd_t pmde = pmdp_get_lockless(pmd);
- *pmd = mm_find_pmd(mm, address);
- if (!*pmd)
- return SCAN_PMD_NULL;
-
- pmde = pmdp_get_lockless(*pmd);
if (pmd_none(pmde))
return SCAN_PMD_NONE;
if (!pmd_present(pmde))
@@ -966,6 +952,17 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
return SCAN_SUCCEED;
}
+static int find_pmd_or_thp_or_none(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t **pmd)
+{
+ *pmd = mm_find_pmd(mm, address);
+ if (!*pmd)
+ return SCAN_PMD_NULL;
+
+ return check_pmd_state(*pmd);
+}
+
static int check_pmd_still_valid(struct mm_struct *mm,
unsigned long address,
pmd_t *pmd)
@@ -1009,7 +1006,11 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
};
if (!pte++) {
- pte = pte_offset_map_nolock(mm, pmd, address, &ptl);
+ /*
+ * Here the ptl is only used to check pte_same() in
+ * do_swap_page(), so readonly version is enough.
+ */
+ pte = pte_offset_map_ro_nolock(mm, pmd, address, &ptl);
if (!pte) {
mmap_read_unlock(mm);
result = SCAN_PMD_NULL;
@@ -1059,7 +1060,7 @@ out:
return result;
}
-static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
+static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
struct collapse_control *cc)
{
gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
@@ -1067,20 +1068,23 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
int node = hpage_collapse_find_target_node(cc);
struct folio *folio;
- if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask)) {
- *hpage = NULL;
+ folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
+ if (!folio) {
+ *foliop = NULL;
+ count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
return SCAN_ALLOC_HUGE_PAGE_FAIL;
}
+ count_vm_event(THP_COLLAPSE_ALLOC);
if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
folio_put(folio);
- *hpage = NULL;
+ *foliop = NULL;
return SCAN_CGROUP_CHARGE_FAIL;
}
count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
- *hpage = folio_page(folio, 0);
+ *foliop = folio;
return SCAN_SUCCEED;
}
@@ -1093,7 +1097,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
pte_t *pte;
pgtable_t pgtable;
struct folio *folio;
- struct page *hpage;
spinlock_t *pmd_ptl, *pte_ptl;
int result = SCAN_FAIL;
struct vm_area_struct *vma;
@@ -1109,7 +1112,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
*/
mmap_read_unlock(mm);
- result = alloc_charge_hpage(&hpage, mm, cc);
+ result = alloc_charge_folio(&folio, mm, cc);
if (result != SCAN_SUCCEED)
goto out_nolock;
@@ -1169,7 +1172,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
* huge and small TLB entries for the same virtual address to
* avoid the risk of CPU bugs in that area.
*
- * Parallel fast GUP is fine since fast GUP will back off when
+ * Parallel GUP-fast is fine since GUP-fast will back off when
* it detects PMD is changed.
*/
_pmd = pmdp_collapse_flush(vma, address, pmd);
@@ -1208,14 +1211,13 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
*/
anon_vma_unlock_write(vma->anon_vma);
- result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd,
+ result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
vma, address, pte_ptl,
&compound_pagelist);
pte_unmap(pte);
if (unlikely(result != SCAN_SUCCEED))
goto out_up_write;
- folio = page_folio(hpage);
/*
* The smp_wmb() inside __folio_mark_uptodate() ensures the
* copy_huge_page writes become visible before the set_pmd_at()
@@ -1224,26 +1226,27 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
__folio_mark_uptodate(folio);
pgtable = pmd_pgtable(_pmd);
- _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
+ _pmd = folio_mk_pmd(folio, vma->vm_page_prot);
_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd));
- folio_add_new_anon_rmap(folio, vma, address);
+ folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd);
+ deferred_split_folio(folio, false);
spin_unlock(pmd_ptl);
- hpage = NULL;
+ folio = NULL;
result = SCAN_SUCCEED;
out_up_write:
mmap_write_unlock(mm);
out_nolock:
- if (hpage)
- put_page(hpage);
+ if (folio)
+ folio_put(folio);
trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
return result;
}
@@ -1334,8 +1337,18 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
result = SCAN_PAGE_NULL;
goto out_unmap;
}
+ folio = page_folio(page);
+
+ if (!folio_test_anon(folio)) {
+ result = SCAN_PAGE_ANON;
+ goto out_unmap;
+ }
- if (page_mapcount(page) > 1) {
+ /*
+ * We treat a single page as shared if any part of the THP
+ * is shared.
+ */
+ if (folio_maybe_mapped_shared(folio)) {
++shared;
if (cc->is_khugepaged &&
shared > khugepaged_max_ptes_shared) {
@@ -1345,7 +1358,6 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
}
}
- folio = page_folio(page);
/*
* Record which node the original page is from and save this
* information to cc->node_load[].
@@ -1366,22 +1378,18 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
result = SCAN_PAGE_LOCK;
goto out_unmap;
}
- if (!folio_test_anon(folio)) {
- result = SCAN_PAGE_ANON;
- goto out_unmap;
- }
/*
* Check if the page has any GUP (or other external) pins.
*
* Here the check may be racy:
- * it may see total_mapcount > refcount in some cases?
+ * it may see folio_mapcount() > folio_ref_count().
* But such case is ephemeral we could always retry collapse
* later. However it may report false positive if the page
* has excessive GUP pins (i.e. 512). Anyway the same check
* will be done again later the risk seems low.
*/
- if (!is_refcount_suitable(folio)) {
+ if (folio_expected_ref_count(folio) != folio_ref_count(folio)) {
result = SCAN_PAGE_COUNT;
goto out_unmap;
}
@@ -1414,7 +1422,7 @@ out_unmap:
*mmap_locked = false;
}
out:
- trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
+ trace_mm_khugepaged_scan_pmd(mm, folio, writable, referenced,
none_or_zero, result, unmapped);
return result;
}
@@ -1443,10 +1451,9 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
}
}
-#ifdef CONFIG_SHMEM
-/* hpage must be locked, and mmap_lock must be held */
+/* folio must be locked, and mmap_lock must be held */
static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmdp, struct page *hpage)
+ pmd_t *pmdp, struct folio *folio, struct page *page)
{
struct vm_fault vmf = {
.vma = vma,
@@ -1455,13 +1462,12 @@ static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
.pmd = pmdp,
};
- VM_BUG_ON(!PageTransHuge(hpage));
mmap_assert_locked(vma->vm_mm);
- if (do_set_pmd(&vmf, hpage))
+ if (do_set_pmd(&vmf, folio, page))
return SCAN_FAIL;
- get_page(hpage);
+ folio_get(folio);
return SCAN_SUCCEED;
}
@@ -1510,8 +1516,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
* and map it by a PMD, regardless of sysfs THP settings. As such, let's
* analogously elide sysfs THP settings here.
*/
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
- PMD_ORDER))
+ if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
return SCAN_VMA_CHECK;
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
@@ -1591,7 +1596,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
pml = pmd_lock(mm, pmd);
- start_pte = pte_offset_map_nolock(mm, pmd, haddr, &ptl);
+ start_pte = pte_offset_map_rw_nolock(mm, pmd, haddr, &pgt_pmd, &ptl);
if (!start_pte) /* mmap_lock + page lock should prevent this */
goto abort;
if (!pml)
@@ -1599,6 +1604,9 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
else if (ptl != pml)
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
+ if (unlikely(!pmd_same(pgt_pmd, pmdp_get_lockless(pmd))))
+ goto abort;
+
/* step 2: clear page table and adjust rmap */
for (i = 0, addr = haddr, pte = start_pte;
i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
@@ -1631,7 +1639,6 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
nr_ptes++;
}
- pte_unmap(start_pte);
if (!pml)
spin_unlock(ptl);
@@ -1644,14 +1651,19 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
/* step 4: remove empty page table */
if (!pml) {
pml = pmd_lock(mm, pmd);
- if (ptl != pml)
+ if (ptl != pml) {
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
+ if (unlikely(!pmd_same(pgt_pmd, pmdp_get_lockless(pmd)))) {
+ flush_tlb_mm(mm);
+ goto unlock;
+ }
+ }
}
pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd);
pmdp_get_lockless_sync();
+ pte_unmap_unlock(start_pte, ptl);
if (ptl != pml)
- spin_unlock(ptl);
- spin_unlock(pml);
+ spin_unlock(pml);
mmu_notifier_invalidate_range_end(&range);
@@ -1662,7 +1674,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
maybe_install_pmd:
/* step 5: install pmd entry */
result = install_pmd
- ? set_huge_pmd(vma, haddr, pmd, &folio->page)
+ ? set_huge_pmd(vma, haddr, pmd, folio, &folio->page)
: SCAN_SUCCEED;
goto drop_folio;
abort:
@@ -1671,6 +1683,7 @@ abort:
folio_ref_sub(folio, nr_ptes);
add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
}
+unlock:
if (start_pte)
pte_unmap_unlock(start_pte, ptl);
if (pml && pml != ptl)
@@ -1695,7 +1708,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
pmd_t *pmd, pgt_pmd;
spinlock_t *pml;
spinlock_t *ptl;
- bool skipped_uffd = false;
+ bool success = false;
/*
* Check vma->anon_vma to exclude MAP_PRIVATE mappings that
@@ -1732,6 +1745,19 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
mmu_notifier_invalidate_range_start(&range);
pml = pmd_lock(mm, pmd);
+ /*
+ * The lock of new_folio is still held, we will be blocked in
+ * the page fault path, which prevents the pte entries from
+ * being set again. So even though the old empty PTE page may be
+ * concurrently freed and a new PTE page is filled into the pmd
+ * entry, it is still empty and can be removed.
+ *
+ * So here we only need to recheck if the state of pmd entry
+ * still meets our requirements, rather than checking pmd_same()
+ * like elsewhere.
+ */
+ if (check_pmd_state(pmd) != SCAN_SUCCEED)
+ goto drop_pml;
ptl = pte_lockptr(mm, pmd);
if (ptl != pml)
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
@@ -1745,20 +1771,20 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* repeating the anon_vma check protects from one category,
* and repeating the userfaultfd_wp() check from another.
*/
- if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) {
- skipped_uffd = true;
- } else {
+ if (likely(!vma->anon_vma && !userfaultfd_wp(vma))) {
pgt_pmd = pmdp_collapse_flush(vma, addr, pmd);
pmdp_get_lockless_sync();
+ success = true;
}
if (ptl != pml)
spin_unlock(ptl);
+drop_pml:
spin_unlock(pml);
mmu_notifier_invalidate_range_end(&range);
- if (!skipped_uffd) {
+ if (success) {
mm_dec_nr_ptes(mm);
page_table_check_pte_clear_range(mm, addr, pgt_pmd);
pte_free_defer(mm, pmd_pgtable(pgt_pmd));
@@ -1797,29 +1823,28 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
struct collapse_control *cc)
{
struct address_space *mapping = file->f_mapping;
- struct page *hpage;
- struct page *page;
- struct page *tmp;
- struct folio *folio;
+ struct page *dst;
+ struct folio *folio, *tmp, *new_folio;
pgoff_t index = 0, end = start + HPAGE_PMD_NR;
LIST_HEAD(pagelist);
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
int nr_none = 0, result = SCAN_SUCCEED;
bool is_shmem = shmem_file(file);
- int nr = 0;
VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
- result = alloc_charge_hpage(&hpage, mm, cc);
+ result = alloc_charge_folio(&new_folio, mm, cc);
if (result != SCAN_SUCCEED)
goto out;
- __SetPageLocked(hpage);
+ mapping_set_update(&xas, mapping);
+
+ __folio_set_locked(new_folio);
if (is_shmem)
- __SetPageSwapBacked(hpage);
- hpage->index = start;
- hpage->mapping = mapping;
+ __folio_set_swapbacked(new_folio);
+ new_folio->index = start;
+ new_folio->mapping = mapping;
/*
* Ensure we have slots for all the pages in the range. This is
@@ -1837,13 +1862,13 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
}
} while (1);
- for (index = start; index < end; index++) {
+ for (index = start; index < end;) {
xas_set(&xas, index);
- page = xas_load(&xas);
+ folio = xas_load(&xas);
VM_BUG_ON(index != xas.xa_index);
if (is_shmem) {
- if (!page) {
+ if (!folio) {
/*
* Stop if extent has been truncated or
* hole-punched, and is now completely
@@ -1856,41 +1881,41 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
}
}
nr_none++;
+ index++;
continue;
}
- if (xa_is_value(page) || !PageUptodate(page)) {
+ if (xa_is_value(folio) || !folio_test_uptodate(folio)) {
xas_unlock_irq(&xas);
/* swap in or instantiate fallocated page */
- if (shmem_get_folio(mapping->host, index,
+ if (shmem_get_folio(mapping->host, index, 0,
&folio, SGP_NOALLOC)) {
result = SCAN_FAIL;
goto xa_unlocked;
}
- /* drain lru cache to help isolate_lru_page() */
+ /* drain lru cache to help folio_isolate_lru() */
lru_add_drain();
- page = folio_file_page(folio, index);
- } else if (trylock_page(page)) {
- get_page(page);
+ } else if (folio_trylock(folio)) {
+ folio_get(folio);
xas_unlock_irq(&xas);
} else {
result = SCAN_PAGE_LOCK;
goto xa_locked;
}
} else { /* !is_shmem */
- if (!page || xa_is_value(page)) {
+ if (!folio || xa_is_value(folio)) {
xas_unlock_irq(&xas);
page_cache_sync_readahead(mapping, &file->f_ra,
file, index,
end - index);
- /* drain lru cache to help isolate_lru_page() */
+ /* drain lru cache to help folio_isolate_lru() */
lru_add_drain();
- page = find_lock_page(mapping, index);
- if (unlikely(page == NULL)) {
+ folio = filemap_lock_folio(mapping, index);
+ if (IS_ERR(folio)) {
result = SCAN_FAIL;
goto xa_unlocked;
}
- } else if (PageDirty(page)) {
+ } else if (folio_test_dirty(folio)) {
/*
* khugepaged only works on read-only fd,
* so this page is dirty because it hasn't
@@ -1908,12 +1933,12 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
filemap_flush(mapping);
result = SCAN_FAIL;
goto xa_unlocked;
- } else if (PageWriteback(page)) {
+ } else if (folio_test_writeback(folio)) {
xas_unlock_irq(&xas);
result = SCAN_FAIL;
goto xa_unlocked;
- } else if (trylock_page(page)) {
- get_page(page);
+ } else if (folio_trylock(folio)) {
+ folio_get(folio);
xas_unlock_irq(&xas);
} else {
result = SCAN_PAGE_LOCK;
@@ -1922,35 +1947,29 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
}
/*
- * The page must be locked, so we can drop the i_pages lock
+ * The folio must be locked, so we can drop the i_pages lock
* without racing with truncate.
*/
- VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
- /* make sure the page is up to date */
- if (unlikely(!PageUptodate(page))) {
+ /* make sure the folio is up to date */
+ if (unlikely(!folio_test_uptodate(folio))) {
result = SCAN_FAIL;
goto out_unlock;
}
/*
* If file was truncated then extended, or hole-punched, before
- * we locked the first page, then a THP might be there already.
+ * we locked the first folio, then a THP might be there already.
* This will be discovered on the first iteration.
*/
- if (PageTransCompound(page)) {
- struct page *head = compound_head(page);
-
- result = compound_order(head) == HPAGE_PMD_ORDER &&
- head->index == start
- /* Maybe PMD-mapped */
- ? SCAN_PTE_MAPPED_HUGEPAGE
- : SCAN_PAGE_COMPOUND;
+ if (folio_order(folio) == HPAGE_PMD_ORDER &&
+ folio->index == start) {
+ /* Maybe PMD-mapped */
+ result = SCAN_PTE_MAPPED_HUGEPAGE;
goto out_unlock;
}
- folio = page_folio(page);
-
if (folio_mapping(folio) != mapping) {
result = SCAN_TRUNCATED;
goto out_unlock;
@@ -1960,7 +1979,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
folio_test_writeback(folio))) {
/*
* khugepaged only works on read-only fd, so this
- * page is dirty because it hasn't been flushed
+ * folio is dirty because it hasn't been flushed
* since first write.
*/
result = SCAN_FAIL;
@@ -1984,42 +2003,44 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
xas_lock_irq(&xas);
- VM_BUG_ON_PAGE(page != xa_load(xas.xa, index), page);
+ VM_BUG_ON_FOLIO(folio != xa_load(xas.xa, index), folio);
/*
- * We control three references to the page:
+ * We control 2 + nr_pages references to the folio:
* - we hold a pin on it;
- * - one reference from page cache;
- * - one from isolate_lru_page;
- * If those are the only references, then any new usage of the
- * page will have to fetch it from the page cache. That requires
- * locking the page to handle truncate, so any new usage will be
- * blocked until we unlock page after collapse/during rollback.
+ * - nr_pages reference from page cache;
+ * - one from lru_isolate_folio;
+ * If those are the only references, then any new usage
+ * of the folio will have to fetch it from the page
+ * cache. That requires locking the folio to handle
+ * truncate, so any new usage will be blocked until we
+ * unlock folio after collapse/during rollback.
*/
- if (page_count(page) != 3) {
+ if (folio_ref_count(folio) != 2 + folio_nr_pages(folio)) {
result = SCAN_PAGE_COUNT;
xas_unlock_irq(&xas);
- putback_lru_page(page);
+ folio_putback_lru(folio);
goto out_unlock;
}
/*
- * Accumulate the pages that are being collapsed.
+ * Accumulate the folios that are being collapsed.
*/
- list_add_tail(&page->lru, &pagelist);
+ list_add_tail(&folio->lru, &pagelist);
+ index += folio_nr_pages(folio);
continue;
out_unlock:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
goto xa_unlocked;
}
if (!is_shmem) {
filemap_nr_thps_inc(mapping);
/*
- * Paired with smp_mb() in do_dentry_open() to ensure
- * i_writecount is up to date and the update to nr_thps is
- * visible. Ensures the page cache will be truncated if the
+ * Paired with the fence in do_dentry_open() -> get_write_access()
+ * to ensure i_writecount is up to date and the update to nr_thps
+ * is visible. Ensures the page cache will be truncated if the
* file is opened writable.
*/
smp_mb();
@@ -2049,23 +2070,32 @@ xa_unlocked:
}
/*
- * The old pages are locked, so they won't change anymore.
+ * The old folios are locked, so they won't change anymore.
*/
index = start;
- list_for_each_entry(page, &pagelist, lru) {
- while (index < page->index) {
- clear_highpage(hpage + (index % HPAGE_PMD_NR));
+ dst = folio_page(new_folio, 0);
+ list_for_each_entry(folio, &pagelist, lru) {
+ int i, nr_pages = folio_nr_pages(folio);
+
+ while (index < folio->index) {
+ clear_highpage(dst);
index++;
+ dst++;
}
- if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) {
- result = SCAN_COPY_MC;
- goto rollback;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (copy_mc_highpage(dst, folio_page(folio, i)) > 0) {
+ result = SCAN_COPY_MC;
+ goto rollback;
+ }
+ index++;
+ dst++;
}
- index++;
}
while (index < end) {
- clear_highpage(hpage + (index % HPAGE_PMD_NR));
+ clear_highpage(dst);
index++;
+ dst++;
}
if (nr_none) {
@@ -2093,16 +2123,17 @@ xa_unlocked:
}
/*
- * If userspace observed a missing page in a VMA with a MODE_MISSING
- * userfaultfd, then it might expect a UFFD_EVENT_PAGEFAULT for that
- * page. If so, we need to roll back to avoid suppressing such an
- * event. Since wp/minor userfaultfds don't give userspace any
- * guarantees that the kernel doesn't fill a missing page with a zero
- * page, so they don't matter here.
+ * If userspace observed a missing page in a VMA with
+ * a MODE_MISSING userfaultfd, then it might expect a
+ * UFFD_EVENT_PAGEFAULT for that page. If so, we need to
+ * roll back to avoid suppressing such an event. Since
+ * wp/minor userfaultfds don't give userspace any
+ * guarantees that the kernel doesn't fill a missing
+ * page with a zero page, so they don't matter here.
*
- * Any userfaultfds registered after this point will not be able to
- * observe any missing pages due to the previously inserted retry
- * entries.
+ * Any userfaultfds registered after this point will
+ * not be able to observe any missing pages due to the
+ * previously inserted retry entries.
*/
vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
if (userfaultfd_missing(vma)) {
@@ -2127,33 +2158,32 @@ immap_locked:
xas_lock_irq(&xas);
}
- folio = page_folio(hpage);
- nr = folio_nr_pages(folio);
if (is_shmem)
- __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
+ __lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
else
- __lruvec_stat_mod_folio(folio, NR_FILE_THPS, nr);
+ __lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
if (nr_none) {
- __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_none);
+ __lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
/* nr_none is always 0 for non-shmem. */
- __lruvec_stat_mod_folio(folio, NR_SHMEM, nr_none);
+ __lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
}
/*
- * Mark hpage as uptodate before inserting it into the page cache so
- * that it isn't mistaken for an fallocated but unwritten page.
+ * Mark new_folio as uptodate before inserting it into the
+ * page cache so that it isn't mistaken for an fallocated but
+ * unwritten page.
*/
- folio_mark_uptodate(folio);
- folio_ref_add(folio, HPAGE_PMD_NR - 1);
+ folio_mark_uptodate(new_folio);
+ folio_ref_add(new_folio, HPAGE_PMD_NR - 1);
if (is_shmem)
- folio_mark_dirty(folio);
- folio_add_lru(folio);
+ folio_mark_dirty(new_folio);
+ folio_add_lru(new_folio);
/* Join all the small entries into a single multi-index entry. */
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
- xas_store(&xas, folio);
+ xas_store(&xas, new_folio);
WARN_ON_ONCE(xas_error(&xas));
xas_unlock_irq(&xas);
@@ -2164,18 +2194,18 @@ immap_locked:
retract_page_tables(mapping, start);
if (cc && !cc->is_khugepaged)
result = SCAN_PTE_MAPPED_HUGEPAGE;
- folio_unlock(folio);
+ folio_unlock(new_folio);
/*
- * The collapse has succeeded, so free the old pages.
+ * The collapse has succeeded, so free the old folios.
*/
- list_for_each_entry_safe(page, tmp, &pagelist, lru) {
- list_del(&page->lru);
- page->mapping = NULL;
- ClearPageActive(page);
- ClearPageUnevictable(page);
- unlock_page(page);
- folio_put_refs(page_folio(page), 3);
+ list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
+ list_del(&folio->lru);
+ folio->mapping = NULL;
+ folio_clear_active(folio);
+ folio_clear_unevictable(folio);
+ folio_unlock(folio);
+ folio_put_refs(folio, 2 + folio_nr_pages(folio));
}
goto out;
@@ -2189,11 +2219,11 @@ rollback:
shmem_uncharge(mapping->host, nr_none);
}
- list_for_each_entry_safe(page, tmp, &pagelist, lru) {
- list_del(&page->lru);
- unlock_page(page);
- putback_lru_page(page);
- put_page(page);
+ list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
+ list_del(&folio->lru);
+ folio_unlock(folio);
+ folio_putback_lru(folio);
+ folio_put(folio);
}
/*
* Undo the updates of filemap_nr_thps_inc for non-SHMEM
@@ -2203,19 +2233,19 @@ rollback:
if (!is_shmem && result == SCAN_COPY_MC) {
filemap_nr_thps_dec(mapping);
/*
- * Paired with smp_mb() in do_dentry_open() to
- * ensure the update to nr_thps is visible.
+ * Paired with the fence in do_dentry_open() -> get_write_access()
+ * to ensure the update to nr_thps is visible.
*/
smp_mb();
}
- hpage->mapping = NULL;
+ new_folio->mapping = NULL;
- unlock_page(hpage);
- put_page(hpage);
+ folio_unlock(new_folio);
+ folio_put(new_folio);
out:
VM_BUG_ON(!list_empty(&pagelist));
- trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
+ trace_mm_khugepaged_collapse_file(mm, new_folio, index, addr, is_shmem, file, HPAGE_PMD_NR, result);
return result;
}
@@ -2223,7 +2253,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
struct file *file, pgoff_t start,
struct collapse_control *cc)
{
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct address_space *mapping = file->f_mapping;
XA_STATE(xas, &mapping->i_pages, start);
int present, swap;
@@ -2235,12 +2265,12 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask);
rcu_read_lock();
- xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
- if (xas_retry(&xas, page))
+ xas_for_each(&xas, folio, start + HPAGE_PMD_NR - 1) {
+ if (xas_retry(&xas, folio))
continue;
- if (xa_is_value(page)) {
- ++swap;
+ if (xa_is_value(folio)) {
+ swap += 1 << xas_get_order(&xas);
if (cc->is_khugepaged &&
swap > khugepaged_max_ptes_swap) {
result = SCAN_EXCEED_SWAP_PTE;
@@ -2250,52 +2280,60 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
continue;
}
- /*
- * TODO: khugepaged should compact smaller compound pages
- * into a PMD sized page
- */
- if (PageTransCompound(page)) {
- struct page *head = compound_head(page);
-
- result = compound_order(head) == HPAGE_PMD_ORDER &&
- head->index == start
- /* Maybe PMD-mapped */
- ? SCAN_PTE_MAPPED_HUGEPAGE
- : SCAN_PAGE_COMPOUND;
+ if (!folio_try_get(folio)) {
+ xas_reset(&xas);
+ continue;
+ }
+
+ if (unlikely(folio != xas_reload(&xas))) {
+ folio_put(folio);
+ xas_reset(&xas);
+ continue;
+ }
+
+ if (folio_order(folio) == HPAGE_PMD_ORDER &&
+ folio->index == start) {
+ /* Maybe PMD-mapped */
+ result = SCAN_PTE_MAPPED_HUGEPAGE;
/*
* For SCAN_PTE_MAPPED_HUGEPAGE, further processing
* by the caller won't touch the page cache, and so
* it's safe to skip LRU and refcount checks before
* returning.
*/
+ folio_put(folio);
break;
}
- node = page_to_nid(page);
+ node = folio_nid(folio);
if (hpage_collapse_scan_abort(node, cc)) {
result = SCAN_SCAN_ABORT;
+ folio_put(folio);
break;
}
cc->node_load[node]++;
- if (!PageLRU(page)) {
+ if (!folio_test_lru(folio)) {
result = SCAN_PAGE_LRU;
+ folio_put(folio);
break;
}
- if (page_count(page) !=
- 1 + page_mapcount(page) + page_has_private(page)) {
+ if (folio_expected_ref_count(folio) + 1 != folio_ref_count(folio)) {
result = SCAN_PAGE_COUNT;
+ folio_put(folio);
break;
}
/*
- * We probably should check if the page is referenced here, but
- * nobody would transfer pte_young() to PageReferenced() for us.
- * And rmap walk here is just too costly...
+ * We probably should check if the folio is referenced
+ * here, but nobody would transfer pte_young() to
+ * folio_test_referenced() for us. And rmap walk here
+ * is just too costly...
*/
- present++;
+ present += folio_nr_pages(folio);
+ folio_put(folio);
if (need_resched()) {
xas_pause(&xas);
@@ -2314,17 +2352,9 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
}
}
- trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
+ trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result);
return result;
}
-#else
-static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
- struct file *file, pgoff_t start,
- struct collapse_control *cc)
-{
- BUILD_BUG();
-}
-#endif
static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
struct collapse_control *cc)
@@ -2376,8 +2406,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
progress++;
break;
}
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
- true, PMD_ORDER)) {
+ if (!thp_vma_allowable_order(vma, vma->vm_flags,
+ TVA_ENFORCE_SYSFS, PMD_ORDER)) {
skip:
progress++;
continue;
@@ -2400,7 +2430,7 @@ skip:
VM_BUG_ON(khugepaged_scan.address < hstart ||
khugepaged_scan.address + HPAGE_PMD_SIZE >
hend);
- if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
+ if (!vma_is_anonymous(vma)) {
struct file *file = get_file(vma->vm_file);
pgoff_t pgoff = linear_page_index(vma,
khugepaged_scan.address);
@@ -2479,8 +2509,7 @@ breakouterloop_mmap_lock:
static int khugepaged_has_work(void)
{
- return !list_empty(&khugepaged_scan.mm_head) &&
- hugepage_flags_enabled();
+ return !list_empty(&khugepaged_scan.mm_head) && hugepage_pmd_enabled();
}
static int khugepaged_wait_event(void)
@@ -2553,7 +2582,7 @@ static void khugepaged_wait_work(void)
return;
}
- if (hugepage_flags_enabled())
+ if (hugepage_pmd_enabled())
wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
}
@@ -2584,7 +2613,7 @@ static void set_recommended_min_free_kbytes(void)
int nr_zones = 0;
unsigned long recommended_min;
- if (!hugepage_flags_enabled()) {
+ if (!hugepage_pmd_enabled()) {
calculate_min_free_kbytes();
goto update_wmarks;
}
@@ -2634,7 +2663,7 @@ int start_stop_khugepaged(void)
int err = 0;
mutex_lock(&khugepaged_mutex);
- if (hugepage_flags_enabled()) {
+ if (hugepage_pmd_enabled()) {
if (!khugepaged_thread)
khugepaged_thread = kthread_run(khugepaged, NULL,
"khugepaged");
@@ -2660,7 +2689,7 @@ fail:
void khugepaged_min_free_kbytes_update(void)
{
mutex_lock(&khugepaged_mutex);
- if (hugepage_flags_enabled() && khugepaged_thread)
+ if (hugepage_pmd_enabled() && khugepaged_thread)
set_recommended_min_free_kbytes();
mutex_unlock(&khugepaged_mutex);
}
@@ -2714,8 +2743,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
*prev = vma;
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
- PMD_ORDER))
+ if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
return -EINVAL;
cc = kmalloc(sizeof(*cc), GFP_KERNEL);
@@ -2748,7 +2776,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
mmap_assert_locked(mm);
memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask);
- if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
+ if (!vma_is_anonymous(vma)) {
struct file *file = get_file(vma->vm_file);
pgoff_t pgoff = linear_page_index(vma, addr);