diff options
Diffstat (limited to 'mm/migrate.c')
| -rw-r--r-- | mm/migrate.c | 2272 |
1 files changed, 1386 insertions, 886 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index a4d3fc65085f..5169f9717f60 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -16,12 +16,10 @@ #include <linux/migrate.h> #include <linux/export.h> #include <linux/swap.h> -#include <linux/swapops.h> +#include <linux/leafops.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/mm_inline.h> -#include <linux/nsproxy.h> -#include <linux/pagevec.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/topology.h> @@ -36,30 +34,89 @@ #include <linux/syscalls.h> #include <linux/compat.h> #include <linux/hugetlb.h> -#include <linux/hugetlb_cgroup.h> #include <linux/gfp.h> -#include <linux/pfn_t.h> -#include <linux/memremap.h> -#include <linux/userfaultfd_k.h> -#include <linux/balloon_compaction.h> #include <linux/page_idle.h> #include <linux/page_owner.h> #include <linux/sched/mm.h> #include <linux/ptrace.h> -#include <linux/oom.h> #include <linux/memory.h> -#include <linux/random.h> #include <linux/sched/sysctl.h> #include <linux/memory-tiers.h> +#include <linux/pagewalk.h> #include <asm/tlbflush.h> #include <trace/events/migrate.h> #include "internal.h" +#include "swap.h" -int isolate_movable_page(struct page *page, isolate_mode_t mode) +static const struct movable_operations *offline_movable_ops; +static const struct movable_operations *zsmalloc_movable_ops; + +int set_movable_ops(const struct movable_operations *ops, enum pagetype type) +{ + /* + * We only allow for selected types and don't handle concurrent + * registration attempts yet. + */ + switch (type) { + case PGTY_offline: + if (offline_movable_ops && ops) + return -EBUSY; + offline_movable_ops = ops; + break; + case PGTY_zsmalloc: + if (zsmalloc_movable_ops && ops) + return -EBUSY; + zsmalloc_movable_ops = ops; + break; + default: + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL_GPL(set_movable_ops); + +static const struct movable_operations *page_movable_ops(struct page *page) { + VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page); + + /* + * If we enable page migration for a page of a certain type by marking + * it as movable, the page type must be sticky until the page gets freed + * back to the buddy. + */ + if (PageOffline(page)) + /* Only balloon compaction sets PageOffline pages movable. */ + return offline_movable_ops; + if (PageZsmalloc(page)) + return zsmalloc_movable_ops; + + return NULL; +} + +/** + * isolate_movable_ops_page - isolate a movable_ops page for migration + * @page: The page. + * @mode: The isolation mode. + * + * Try to isolate a movable_ops page for migration. Will fail if the page is + * not a movable_ops page, if the page is already isolated for migration + * or if the page was just was released by its owner. + * + * Once isolated, the page cannot get freed until it is either putback + * or migrated. + * + * Returns true if isolation succeeded, otherwise false. + */ +bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode) +{ + /* + * TODO: these pages will not be folios in the future. All + * folio dependencies will have to be removed. + */ + struct folio *folio = folio_get_nontail_page(page); const struct movable_operations *mops; /* @@ -71,24 +128,19 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode) * the put_page() at the end of this block will take care of * release this page, thus avoiding a nasty leakage. */ - if (unlikely(!get_page_unless_zero(page))) + if (!folio) goto out; - if (unlikely(PageSlab(page))) - goto out_putpage; - /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */ - smp_rmb(); /* - * Check movable flag before taking the page lock because + * Check for movable_ops pages before taking the page lock because * we use non-atomic bitops on newly allocated page flags so * unconditionally grabbing the lock ruins page's owner side. + * + * Note that once a page has movable_ops, it will stay that way + * until the page was freed. */ - if (unlikely(!__PageMovable(page))) - goto out_putpage; - /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */ - smp_rmb(); - if (unlikely(PageSlab(page))) - goto out_putpage; + if (unlikely(!page_has_movable_ops(page))) + goto out_putfolio; /* * As movable pages are not isolated from LRU lists, concurrent @@ -101,39 +153,97 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode) * lets be sure we have the page lock * before proceeding with the movable page isolation steps. */ - if (unlikely(!trylock_page(page))) - goto out_putpage; + if (unlikely(!folio_trylock(folio))) + goto out_putfolio; - if (!PageMovable(page) || PageIsolated(page)) + VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page); + if (PageMovableOpsIsolated(page)) goto out_no_isolated; mops = page_movable_ops(page); - VM_BUG_ON_PAGE(!mops, page); + if (WARN_ON_ONCE(!mops)) + goto out_no_isolated; if (!mops->isolate_page(page, mode)) goto out_no_isolated; - /* Driver shouldn't use PG_isolated bit of page->flags */ - WARN_ON_ONCE(PageIsolated(page)); - SetPageIsolated(page); - unlock_page(page); + /* Driver shouldn't use the isolated flag */ + VM_WARN_ON_ONCE_PAGE(PageMovableOpsIsolated(page), page); + SetPageMovableOpsIsolated(page); + folio_unlock(folio); - return 0; + return true; out_no_isolated: - unlock_page(page); -out_putpage: - put_page(page); + folio_unlock(folio); +out_putfolio: + folio_put(folio); out: - return -EBUSY; + return false; } -static void putback_movable_page(struct page *page) +/** + * putback_movable_ops_page - putback an isolated movable_ops page + * @page: The isolated page. + * + * Putback an isolated movable_ops page. + * + * After the page was putback, it might get freed instantly. + */ +static void putback_movable_ops_page(struct page *page) { - const struct movable_operations *mops = page_movable_ops(page); + /* + * TODO: these pages will not be folios in the future. All + * folio dependencies will have to be removed. + */ + struct folio *folio = page_folio(page); - mops->putback_page(page); - ClearPageIsolated(page); + VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page); + VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(page), page); + folio_lock(folio); + page_movable_ops(page)->putback_page(page); + ClearPageMovableOpsIsolated(page); + folio_unlock(folio); + folio_put(folio); +} + +/** + * migrate_movable_ops_page - migrate an isolated movable_ops page + * @dst: The destination page. + * @src: The source page. + * @mode: The migration mode. + * + * Migrate an isolated movable_ops page. + * + * If the src page was already released by its owner, the src page is + * un-isolated (putback) and migration succeeds; the migration core will be the + * owner of both pages. + * + * If the src page was not released by its owner and the migration was + * successful, the owner of the src page and the dst page are swapped and + * the src page is un-isolated. + * + * If migration fails, the ownership stays unmodified and the src page + * remains isolated: migration may be retried later or the page can be putback. + * + * TODO: migration core will treat both pages as folios and lock them before + * this call to unlock them after this call. Further, the folio refcounts on + * src and dst are also released by migration core. These pages will not be + * folios in the future, so that must be reworked. + * + * Returns 0 on success, otherwise a negative error code. + */ +static int migrate_movable_ops_page(struct page *dst, struct page *src, + enum migrate_mode mode) +{ + int rc; + + VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src); + VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(src), src); + rc = page_movable_ops(src)->migrate_page(dst, src, mode); + if (!rc) + ClearPageMovableOpsIsolated(src); + return rc; } /* @@ -142,53 +252,108 @@ static void putback_movable_page(struct page *page) * * This function shall be used whenever the isolated pageset has been * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() - * and isolate_hugetlb(). + * and folio_isolate_hugetlb(). */ void putback_movable_pages(struct list_head *l) { - struct page *page; - struct page *page2; + struct folio *folio; + struct folio *folio2; - list_for_each_entry_safe(page, page2, l, lru) { - if (unlikely(PageHuge(page))) { - putback_active_hugepage(page); + list_for_each_entry_safe(folio, folio2, l, lru) { + if (unlikely(folio_test_hugetlb(folio))) { + folio_putback_hugetlb(folio); continue; } - list_del(&page->lru); - /* - * We isolated non-lru movable page so here we can use - * __PageMovable because LRU page's mapping cannot have - * PAGE_MAPPING_MOVABLE. - */ - if (unlikely(__PageMovable(page))) { - VM_BUG_ON_PAGE(!PageIsolated(page), page); - lock_page(page); - if (PageMovable(page)) - putback_movable_page(page); - else - ClearPageIsolated(page); - unlock_page(page); - put_page(page); + list_del(&folio->lru); + if (unlikely(page_has_movable_ops(&folio->page))) { + putback_movable_ops_page(&folio->page); } else { - mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + - page_is_file_lru(page), -thp_nr_pages(page)); - putback_lru_page(page); + node_stat_mod_folio(folio, NR_ISOLATED_ANON + + folio_is_file_lru(folio), -folio_nr_pages(folio)); + folio_putback_lru(folio); } } } +/* Must be called with an elevated refcount on the non-hugetlb folio */ +bool isolate_folio_to_list(struct folio *folio, struct list_head *list) +{ + if (folio_test_hugetlb(folio)) + return folio_isolate_hugetlb(folio, list); + + if (page_has_movable_ops(&folio->page)) { + if (!isolate_movable_ops_page(&folio->page, + ISOLATE_UNEVICTABLE)) + return false; + } else { + if (!folio_isolate_lru(folio)) + return false; + node_stat_add_folio(folio, NR_ISOLATED_ANON + + folio_is_file_lru(folio)); + } + list_add(&folio->lru, list); + return true; +} + +static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, + struct folio *folio, pte_t old_pte, unsigned long idx) +{ + struct page *page = folio_page(folio, idx); + pte_t newpte; + + if (PageCompound(page) || PageHWPoison(page)) + return false; + + VM_BUG_ON_PAGE(!PageAnon(page), page); + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(pte_present(old_pte), page); + VM_WARN_ON_ONCE_FOLIO(folio_is_device_private(folio), folio); + + if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) || + mm_forbids_zeropage(pvmw->vma->vm_mm)) + return false; + + /* + * The pmd entry mapping the old thp was flushed and the pte mapping + * this subpage has been non present. If the subpage is only zero-filled + * then map it to the shared zeropage. + */ + if (!pages_identical(page, ZERO_PAGE(0))) + return false; + + newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address), + pvmw->vma->vm_page_prot)); + + if (pte_swp_soft_dirty(old_pte)) + newpte = pte_mksoft_dirty(newpte); + if (pte_swp_uffd_wp(old_pte)) + newpte = pte_mkuffd_wp(newpte); + + set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte); + + dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio)); + return true; +} + +struct rmap_walk_arg { + struct folio *folio; + bool map_unused_to_zeropage; +}; + /* * Restore a potential migration pte to a working pte entry */ static bool remove_migration_pte(struct folio *folio, - struct vm_area_struct *vma, unsigned long addr, void *old) + struct vm_area_struct *vma, unsigned long addr, void *arg) { - DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); + struct rmap_walk_arg *rmap_walk_arg = arg; + DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION); while (page_vma_mapped_walk(&pvmw)) { rmap_t rmap_flags = RMAP_NONE; + pte_t old_pte; pte_t pte; - swp_entry_t entry; + softleaf_t entry; struct page *new; unsigned long idx = 0; @@ -206,26 +371,30 @@ static bool remove_migration_pte(struct folio *folio, continue; } #endif + old_pte = ptep_get(pvmw.pte); + if (rmap_walk_arg->map_unused_to_zeropage && + try_to_map_unused_to_zeropage(&pvmw, folio, old_pte, idx)) + continue; folio_get(folio); pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); - if (pte_swp_soft_dirty(*pvmw.pte)) - pte = pte_mksoft_dirty(pte); - /* - * Recheck VMA as permissions can change since migration started - */ - entry = pte_to_swp_entry(*pvmw.pte); - if (!is_migration_entry_young(entry)) + entry = softleaf_from_pte(old_pte); + if (!softleaf_is_migration_young(entry)) pte = pte_mkold(pte); - if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) + if (folio_test_dirty(folio) && softleaf_is_migration_dirty(entry)) pte = pte_mkdirty(pte); - if (is_writable_migration_entry(entry)) - pte = maybe_mkwrite(pte, vma); - else if (pte_swp_uffd_wp(*pvmw.pte)) + if (pte_swp_soft_dirty(old_pte)) + pte = pte_mksoft_dirty(pte); + else + pte = pte_clear_soft_dirty(pte); + + if (softleaf_is_migration_write(entry)) + pte = pte_mkwrite(pte, vma); + else if (pte_swp_uffd_wp(old_pte)) pte = pte_mkuffd_wp(pte); - if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) + if (folio_test_anon(folio) && !softleaf_is_migration_read(entry)) rmap_flags |= RMAP_EXCLUSIVE; if (unlikely(is_device_private_page(new))) { @@ -235,37 +404,39 @@ static bool remove_migration_pte(struct folio *folio, else entry = make_readable_device_private_entry( page_to_pfn(new)); - pte = swp_entry_to_pte(entry); - if (pte_swp_soft_dirty(*pvmw.pte)) + pte = softleaf_to_pte(entry); + if (pte_swp_soft_dirty(old_pte)) pte = pte_swp_mksoft_dirty(pte); - if (pte_swp_uffd_wp(*pvmw.pte)) + if (pte_swp_uffd_wp(old_pte)) pte = pte_swp_mkuffd_wp(pte); } #ifdef CONFIG_HUGETLB_PAGE if (folio_test_hugetlb(folio)) { - unsigned int shift = huge_page_shift(hstate_vma(vma)); + struct hstate *h = hstate_vma(vma); + unsigned int shift = huge_page_shift(h); + unsigned long psize = huge_page_size(h); - pte = pte_mkhuge(pte); pte = arch_make_huge_pte(pte, shift, vma->vm_flags); if (folio_test_anon(folio)) - hugepage_add_anon_rmap(new, vma, pvmw.address, - rmap_flags); + hugetlb_add_anon_rmap(folio, vma, pvmw.address, + rmap_flags); else - page_dup_file_rmap(new, true); - set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); + hugetlb_add_file_rmap(folio); + set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte, + psize); } else #endif { if (folio_test_anon(folio)) - page_add_anon_rmap(new, vma, pvmw.address, - rmap_flags); + folio_add_anon_rmap_pte(folio, new, vma, + pvmw.address, rmap_flags); else - page_add_file_rmap(new, vma, false); + folio_add_file_rmap_pte(folio, new, vma); set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); } - if (vma->vm_flags & VM_LOCKED) - mlock_page_drain_local(); + if (READ_ONCE(vma->vm_flags) & VM_LOCKED) + mlock_drain_local(); trace_remove_migration_pte(pvmw.address, pte_val(pte), compound_order(new)); @@ -281,14 +452,21 @@ static bool remove_migration_pte(struct folio *folio, * Get rid of all migration entries and replace them by * references to the indicated page. */ -void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) +void remove_migration_ptes(struct folio *src, struct folio *dst, int flags) { + struct rmap_walk_arg rmap_walk_arg = { + .folio = src, + .map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE, + }; + struct rmap_walk_control rwc = { .rmap_one = remove_migration_pte, - .arg = src, + .arg = &rmap_walk_arg, }; - if (locked) + VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src); + + if (flags & RMP_LOCKED) rmap_walk_locked(dst, &rwc); else rmap_walk(dst, &rwc); @@ -299,54 +477,70 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) * get to the page and wait until migration is finished. * When we return from this function the fault will be retried. */ -void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, - spinlock_t *ptl) +void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, + unsigned long address) { + spinlock_t *ptl; + pte_t *ptep; pte_t pte; - swp_entry_t entry; + softleaf_t entry; - spin_lock(ptl); - pte = *ptep; - if (!is_swap_pte(pte)) + ptep = pte_offset_map_lock(mm, pmd, address, &ptl); + if (!ptep) + return; + + pte = ptep_get(ptep); + pte_unmap(ptep); + + if (pte_none(pte) || pte_present(pte)) goto out; - entry = pte_to_swp_entry(pte); - if (!is_migration_entry(entry)) + entry = softleaf_from_pte(pte); + if (!softleaf_is_migration(entry)) goto out; - migration_entry_wait_on_locked(entry, ptep, ptl); + migration_entry_wait_on_locked(entry, ptl); return; out: - pte_unmap_unlock(ptep, ptl); -} - -void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, - unsigned long address) -{ - spinlock_t *ptl = pte_lockptr(mm, pmd); - pte_t *ptep = pte_offset_map(pmd, address); - __migration_entry_wait(mm, ptep, ptl); + spin_unlock(ptl); } #ifdef CONFIG_HUGETLB_PAGE -void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) +/* + * The vma read lock must be held upon entry. Holding that lock prevents either + * the pte or the ptl from being freed. + * + * This function will release the vma lock before returning. + */ +void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { + spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep); + softleaf_t entry; pte_t pte; + hugetlb_vma_assert_locked(vma); spin_lock(ptl); - pte = huge_ptep_get(ptep); + pte = huge_ptep_get(vma->vm_mm, addr, ptep); - if (unlikely(!is_hugetlb_entry_migration(pte))) - spin_unlock(ptl); - else - migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl); -} + if (huge_pte_none(pte)) + goto fail; -void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) -{ - spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte); + entry = softleaf_from_pte(pte); + if (softleaf_is_migration(entry)) { + /* + * If migration entry existed, safe to release vma lock + * here because the pgtable page won't be freed without the + * pgtable lock released. See comment right above pgtable + * lock release in migration_entry_wait_on_locked(). + */ + hugetlb_vma_unlock_read(vma); + migration_entry_wait_on_locked(entry, ptl); + return; + } - __migration_entry_wait_huge(pte, ptl); +fail: + spin_unlock(ptl); + hugetlb_vma_unlock_read(vma); } #endif @@ -356,112 +550,120 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) spinlock_t *ptl; ptl = pmd_lock(mm, pmd); - if (!is_pmd_migration_entry(*pmd)) + if (!pmd_is_migration_entry(*pmd)) goto unlock; - migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl); + migration_entry_wait_on_locked(softleaf_from_pmd(*pmd), ptl); return; unlock: spin_unlock(ptl); } #endif -static int folio_expected_refs(struct address_space *mapping, - struct folio *folio) -{ - int refs = 1; - if (!mapping) - return refs; - - refs += folio_nr_pages(folio); - if (folio_test_private(folio)) - refs++; - - return refs; -} - /* - * Replace the page in the mapping. + * Replace the folio in the mapping. * * The number of remaining references must be: - * 1 for anonymous pages without a mapping - * 2 for pages with a mapping - * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. + * 1 for anonymous folios without a mapping + * 2 for folios with a mapping + * 3 for folios with a mapping and the private flag set. */ -int folio_migrate_mapping(struct address_space *mapping, - struct folio *newfolio, struct folio *folio, int extra_count) +static int __folio_migrate_mapping(struct address_space *mapping, + struct folio *newfolio, struct folio *folio, int expected_count) { - XA_STATE(xas, &mapping->i_pages, folio_index(folio)); + XA_STATE(xas, &mapping->i_pages, folio->index); + struct swap_cluster_info *ci = NULL; struct zone *oldzone, *newzone; int dirty; - int expected_count = folio_expected_refs(mapping, folio) + extra_count; long nr = folio_nr_pages(folio); if (!mapping) { - /* Anonymous page without mapping */ - if (folio_ref_count(folio) != expected_count) - return -EAGAIN; + /* Take off deferred split queue while frozen and memcg set */ + if (folio_test_large(folio) && + folio_test_large_rmappable(folio)) { + if (!folio_ref_freeze(folio, expected_count)) + return -EAGAIN; + folio_unqueue_deferred_split(folio); + folio_ref_unfreeze(folio, expected_count); + } /* No turning back from here */ newfolio->index = folio->index; newfolio->mapping = folio->mapping; + if (folio_test_anon(folio) && folio_test_large(folio)) + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); if (folio_test_swapbacked(folio)) __folio_set_swapbacked(newfolio); - return MIGRATEPAGE_SUCCESS; + return 0; } oldzone = folio_zone(folio); newzone = folio_zone(newfolio); - xas_lock_irq(&xas); + if (folio_test_swapcache(folio)) + ci = swap_cluster_get_and_lock_irq(folio); + else + xas_lock_irq(&xas); + if (!folio_ref_freeze(folio, expected_count)) { - xas_unlock_irq(&xas); + if (ci) + swap_cluster_unlock_irq(ci); + else + xas_unlock_irq(&xas); return -EAGAIN; } + /* Take off deferred split queue while frozen and memcg set */ + folio_unqueue_deferred_split(folio); + /* * Now we know that no one else is looking at the folio: * no turning back from here. */ newfolio->index = folio->index; newfolio->mapping = folio->mapping; + if (folio_test_anon(folio) && folio_test_large(folio)) + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); folio_ref_add(newfolio, nr); /* add cache reference */ - if (folio_test_swapbacked(folio)) { + if (folio_test_swapbacked(folio)) __folio_set_swapbacked(newfolio); - if (folio_test_swapcache(folio)) { - folio_set_swapcache(newfolio); - newfolio->private = folio_get_private(folio); - } - } else { - VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); + if (folio_test_swapcache(folio)) { + folio_set_swapcache(newfolio); + newfolio->private = folio_get_private(folio); } - /* Move dirty while page refs frozen and newpage not yet exposed */ + /* Move dirty while folio refs frozen and newfolio not yet exposed */ dirty = folio_test_dirty(folio); if (dirty) { folio_clear_dirty(folio); folio_set_dirty(newfolio); } - xas_store(&xas, newfolio); + if (folio_test_swapcache(folio)) + __swap_cache_replace_folio(ci, folio, newfolio); + else + xas_store(&xas, newfolio); /* - * Drop cache reference from old page by unfreezing + * Drop cache reference from old folio by unfreezing * to one less reference. * We know this isn't the last reference. */ folio_ref_unfreeze(folio, expected_count - nr); - xas_unlock(&xas); /* Leave irq disabled to prevent preemption while updating stats */ + if (ci) + swap_cluster_unlock(ci); + else + xas_unlock(&xas); /* * If moved to a different zone then also account - * the page for that zone. Other VM counters will be + * the folio for that zone. Other VM counters will be * taken care of when we establish references to the - * new page and drop references to the old page. + * new folio and drop references to the old folio. * - * Note that anonymous pages are accounted for + * Note that anonymous folios are accounted for * via NR_FILE_PAGES and NR_ANON_MAPPED if they * are mapped to swap space. */ @@ -473,28 +675,44 @@ int folio_migrate_mapping(struct address_space *mapping, old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); - __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); - __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); + mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); + mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { - __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); - __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); + mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); + mod_lruvec_state(new_lruvec, NR_SHMEM, nr); + + if (folio_test_pmd_mappable(folio)) { + mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr); + mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr); + } } #ifdef CONFIG_SWAP if (folio_test_swapcache(folio)) { - __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); - __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); + mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); + mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); } #endif if (dirty && mapping_can_writeback(mapping)) { - __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); + mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); - __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); + mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); } } local_irq_enable(); - return MIGRATEPAGE_SUCCESS; + return 0; +} + +int folio_migrate_mapping(struct address_space *mapping, + struct folio *newfolio, struct folio *folio, int extra_count) +{ + int expected_count = folio_expected_ref_count(folio) + extra_count + 1; + + if (folio_ref_count(folio) != expected_count) + return -EAGAIN; + + return __folio_migrate_mapping(mapping, newfolio, folio, expected_count); } EXPORT_SYMBOL(folio_migrate_mapping); @@ -505,11 +723,17 @@ EXPORT_SYMBOL(folio_migrate_mapping); int migrate_huge_page_move_mapping(struct address_space *mapping, struct folio *dst, struct folio *src) { - XA_STATE(xas, &mapping->i_pages, folio_index(src)); - int expected_count; + XA_STATE(xas, &mapping->i_pages, src->index); + int rc, expected_count = folio_expected_ref_count(src) + 1; + + if (folio_ref_count(src) != expected_count) + return -EAGAIN; + + rc = folio_mc_copy(dst, src); + if (unlikely(rc)) + return rc; xas_lock_irq(&xas); - expected_count = 2 + folio_has_private(src); if (!folio_ref_freeze(src, expected_count)) { xas_unlock_irq(&xas); return -EAGAIN; @@ -518,15 +742,15 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, dst->index = src->index; dst->mapping = src->mapping; - folio_get(dst); + folio_ref_add(dst, folio_nr_pages(dst)); xas_store(&xas, dst); - folio_ref_unfreeze(src, expected_count - 1); + folio_ref_unfreeze(src, expected_count - folio_nr_pages(src)); xas_unlock_irq(&xas); - return MIGRATEPAGE_SUCCESS; + return 0; } /* @@ -536,8 +760,6 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio) { int cpupid; - if (folio_test_error(folio)) - folio_set_error(newfolio); if (folio_test_referenced(folio)) folio_set_referenced(newfolio); if (folio_test_uptodate(folio)) @@ -569,29 +791,31 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio) if (folio_test_idle(folio)) folio_set_idle(newfolio); + folio_migrate_refs(newfolio, folio); /* * Copy NUMA information to the new page, to prevent over-eager * future migrations of this same page. */ - cpupid = page_cpupid_xchg_last(&folio->page, -1); + cpupid = folio_xchg_last_cpupid(folio, -1); /* * For memory tiering mode, when migrate between slow and fast * memory node, reset cpupid, because that is used to record * page access time in slow memory node. */ if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) { - bool f_toptier = node_is_toptier(page_to_nid(&folio->page)); - bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page)); + bool f_toptier = node_is_toptier(folio_nid(folio)); + bool t_toptier = node_is_toptier(folio_nid(newfolio)); if (f_toptier != t_toptier) cpupid = -1; } - page_cpupid_xchg_last(&newfolio->page, cpupid); + folio_xchg_last_cpupid(newfolio, cpupid); folio_migrate_ksm(newfolio, folio); /* * Please do not reorder this without considering how mm/ksm.c's - * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). + * ksm_get_folio() depends upon ksm_migrate_page() and the + * swapcache flag. */ if (folio_test_swapcache(folio)) folio_clear_swapcache(folio); @@ -617,40 +841,39 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio) folio_set_readahead(newfolio); folio_copy_owner(newfolio, folio); + pgalloc_tag_swap(newfolio, folio); - if (!folio_test_hugetlb(folio)) - mem_cgroup_migrate(folio, newfolio); + mem_cgroup_migrate(folio, newfolio); } EXPORT_SYMBOL(folio_migrate_flags); -void folio_migrate_copy(struct folio *newfolio, struct folio *folio) -{ - folio_copy(newfolio, folio); - folio_migrate_flags(newfolio, folio); -} -EXPORT_SYMBOL(folio_migrate_copy); - /************************************************************ * Migration functions ***********************************************************/ -int migrate_folio_extra(struct address_space *mapping, struct folio *dst, - struct folio *src, enum migrate_mode mode, int extra_count) +static int __migrate_folio(struct address_space *mapping, struct folio *dst, + struct folio *src, void *src_private, + enum migrate_mode mode) { - int rc; + int rc, expected_count = folio_expected_ref_count(src) + 1; - BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */ + /* Check whether src does not have extra refs before we do more work */ + if (folio_ref_count(src) != expected_count) + return -EAGAIN; - rc = folio_migrate_mapping(mapping, dst, src, extra_count); + rc = folio_mc_copy(dst, src); + if (unlikely(rc)) + return rc; - if (rc != MIGRATEPAGE_SUCCESS) + rc = __folio_migrate_mapping(mapping, dst, src, expected_count); + if (rc) return rc; - if (mode != MIGRATE_SYNC_NO_COPY) - folio_migrate_copy(dst, src); - else - folio_migrate_flags(dst, src); - return MIGRATEPAGE_SUCCESS; + if (src_private) + folio_attach_private(dst, folio_detach_private(src)); + + folio_migrate_flags(dst, src); + return 0; } /** @@ -661,54 +884,50 @@ int migrate_folio_extra(struct address_space *mapping, struct folio *dst, * @mode: How to migrate the page. * * Common logic to directly migrate a single LRU folio suitable for - * folios that do not use PagePrivate/PagePrivate2. + * folios that do not have private data. * * Folios are locked upon entry and exit. */ int migrate_folio(struct address_space *mapping, struct folio *dst, - struct folio *src, enum migrate_mode mode) + struct folio *src, enum migrate_mode mode) { - return migrate_folio_extra(mapping, dst, src, mode, 0); + BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */ + return __migrate_folio(mapping, dst, src, NULL, mode); } EXPORT_SYMBOL(migrate_folio); -#ifdef CONFIG_BLOCK +#ifdef CONFIG_BUFFER_HEAD /* Returns true if all buffers are successfully locked */ static bool buffer_migrate_lock_buffers(struct buffer_head *head, enum migrate_mode mode) { struct buffer_head *bh = head; + struct buffer_head *failed_bh; - /* Simple case, sync compaction */ - if (mode != MIGRATE_ASYNC) { - do { - lock_buffer(bh); - bh = bh->b_this_page; - - } while (bh != head); - - return true; - } - - /* async case, we cannot block on lock_buffer so use trylock_buffer */ do { if (!trylock_buffer(bh)) { - /* - * We failed to lock the buffer and cannot stall in - * async migration. Release the taken locks - */ - struct buffer_head *failed_bh = bh; - bh = head; - while (bh != failed_bh) { - unlock_buffer(bh); - bh = bh->b_this_page; - } - return false; + if (mode == MIGRATE_ASYNC) + goto unlock; + if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh)) + goto unlock; + lock_buffer(bh); } bh = bh->b_this_page; } while (bh != head); + return true; + +unlock: + /* We failed to lock the buffer and cannot stall. */ + failed_bh = bh; + bh = head; + while (bh != failed_bh) { + unlock_buffer(bh); + bh = bh->b_this_page; + } + + return false; } static int __buffer_migrate_folio(struct address_space *mapping, @@ -724,7 +943,7 @@ static int __buffer_migrate_folio(struct address_space *mapping, return migrate_folio(mapping, dst, src, mode); /* Check whether page does not have extra refs before we do more work */ - expected_count = folio_expected_refs(mapping, src); + expected_count = folio_expected_ref_count(src) + 1; if (folio_ref_count(src) != expected_count) return -EAGAIN; @@ -732,12 +951,14 @@ static int __buffer_migrate_folio(struct address_space *mapping, return -EAGAIN; if (check_refs) { - bool busy; + bool busy, migrating; bool invalidated = false; + migrating = test_and_set_bit_lock(BH_Migrate, &head->b_state); + VM_WARN_ON_ONCE(migrating); recheck_buffers: busy = false; - spin_lock(&mapping->private_lock); + spin_lock(&mapping->i_private_lock); bh = head; do { if (atomic_read(&bh->b_count)) { @@ -746,39 +967,31 @@ recheck_buffers: } bh = bh->b_this_page; } while (bh != head); + spin_unlock(&mapping->i_private_lock); if (busy) { if (invalidated) { rc = -EAGAIN; goto unlock_buffers; } - spin_unlock(&mapping->private_lock); invalidate_bh_lrus(); invalidated = true; goto recheck_buffers; } } - rc = folio_migrate_mapping(mapping, dst, src, 0); - if (rc != MIGRATEPAGE_SUCCESS) + rc = filemap_migrate_folio(mapping, dst, src, mode); + if (rc) goto unlock_buffers; - folio_attach_private(dst, folio_detach_private(src)); - bh = head; do { - set_bh_page(bh, &dst->page, bh_offset(bh)); + folio_set_bh(bh, dst, bh_offset(bh)); bh = bh->b_this_page; } while (bh != head); - if (mode != MIGRATE_SYNC_NO_COPY) - folio_migrate_copy(dst, src); - else - folio_migrate_flags(dst, src); - - rc = MIGRATEPAGE_SUCCESS; unlock_buffers: if (check_refs) - spin_unlock(&mapping->private_lock); + clear_bit_unlock(BH_Migrate, &head->b_state); bh = head; do { unlock_buffer(bh); @@ -830,195 +1043,183 @@ int buffer_migrate_folio_norefs(struct address_space *mapping, return __buffer_migrate_folio(mapping, dst, src, mode, true); } EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs); -#endif +#endif /* CONFIG_BUFFER_HEAD */ int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { - int ret; - - ret = folio_migrate_mapping(mapping, dst, src, 0); - if (ret != MIGRATEPAGE_SUCCESS) - return ret; - - if (folio_get_private(src)) - folio_attach_private(dst, folio_detach_private(src)); - - if (mode != MIGRATE_SYNC_NO_COPY) - folio_migrate_copy(dst, src); - else - folio_migrate_flags(dst, src); - return MIGRATEPAGE_SUCCESS; + return __migrate_folio(mapping, dst, src, folio_get_private(src), mode); } EXPORT_SYMBOL_GPL(filemap_migrate_folio); /* - * Writeback a folio to clean the dirty state - */ -static int writeout(struct address_space *mapping, struct folio *folio) -{ - struct writeback_control wbc = { - .sync_mode = WB_SYNC_NONE, - .nr_to_write = 1, - .range_start = 0, - .range_end = LLONG_MAX, - .for_reclaim = 1 - }; - int rc; - - if (!mapping->a_ops->writepage) - /* No write method for the address space */ - return -EINVAL; - - if (!folio_clear_dirty_for_io(folio)) - /* Someone else already triggered a write */ - return -EAGAIN; - - /* - * A dirty folio may imply that the underlying filesystem has - * the folio on some queue. So the folio must be clean for - * migration. Writeout may mean we lose the lock and the - * folio state is no longer what we checked for earlier. - * At this point we know that the migration attempt cannot - * be successful. - */ - remove_migration_ptes(folio, folio, false); - - rc = mapping->a_ops->writepage(&folio->page, &wbc); - - if (rc != AOP_WRITEPAGE_ACTIVATE) - /* unlocked. Relock */ - folio_lock(folio); - - return (rc < 0) ? -EIO : -EAGAIN; -} - -/* * Default handling if a filesystem does not provide a migration function. */ static int fallback_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { - if (folio_test_dirty(src)) { - /* Only writeback folios in full synchronous migration */ - switch (mode) { - case MIGRATE_SYNC: - case MIGRATE_SYNC_NO_COPY: - break; - default: - return -EBUSY; - } - return writeout(mapping, src); - } + WARN_ONCE(mapping->a_ops->writepages, + "%ps does not implement migrate_folio\n", + mapping->a_ops); + if (folio_test_dirty(src)) + return -EBUSY; /* - * Buffers may be managed in a filesystem specific way. - * We must have no buffers or drop them. + * Filesystem may have private data at folio->private that we + * can't migrate automatically. */ - if (folio_test_private(src) && - !filemap_release_folio(src, GFP_KERNEL)) + if (!filemap_release_folio(src, GFP_KERNEL)) return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; return migrate_folio(mapping, dst, src, mode); } /* - * Move a page to a newly allocated page - * The page is locked and all ptes have been successfully removed. + * Move a src folio to a newly allocated dst folio. + * + * The src and dst folios are locked and the src folios was unmapped from + * the page tables. * - * The new page will have replaced the old page if this function - * is successful. + * On success, the src folio was replaced by the dst folio. * * Return value: * < 0 - error code - * MIGRATEPAGE_SUCCESS - success + * 0 - success */ static int move_to_new_folio(struct folio *dst, struct folio *src, enum migrate_mode mode) { + struct address_space *mapping = folio_mapping(src); int rc = -EAGAIN; - bool is_lru = !__PageMovable(&src->page); VM_BUG_ON_FOLIO(!folio_test_locked(src), src); VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst); - if (likely(is_lru)) { - struct address_space *mapping = folio_mapping(src); - - if (!mapping) - rc = migrate_folio(mapping, dst, src, mode); - else if (mapping->a_ops->migrate_folio) - /* - * Most folios have a mapping and most filesystems - * provide a migrate_folio callback. Anonymous folios - * are part of swap space which also has its own - * migrate_folio callback. This is the most common path - * for page migration. - */ - rc = mapping->a_ops->migrate_folio(mapping, dst, src, - mode); - else - rc = fallback_migrate_folio(mapping, dst, src, mode); - } else { - const struct movable_operations *mops; - + if (!mapping) + rc = migrate_folio(mapping, dst, src, mode); + else if (mapping_inaccessible(mapping)) + rc = -EOPNOTSUPP; + else if (mapping->a_ops->migrate_folio) /* - * In case of non-lru page, it could be released after - * isolation step. In that case, we shouldn't try migration. + * Most folios have a mapping and most filesystems + * provide a migrate_folio callback. Anonymous folios + * are part of swap space which also has its own + * migrate_folio callback. This is the most common path + * for page migration. */ - VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); - if (!folio_test_movable(src)) { - rc = MIGRATEPAGE_SUCCESS; - folio_clear_isolated(src); - goto out; - } - - mops = page_movable_ops(&src->page); - rc = mops->migrate_page(&dst->page, &src->page, mode); - WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && - !folio_test_isolated(src)); - } - - /* - * When successful, old pagecache src->mapping must be cleared before - * src is freed; but stats require that PageAnon be left as PageAnon. - */ - if (rc == MIGRATEPAGE_SUCCESS) { - if (__PageMovable(&src->page)) { - VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); - - /* - * We clear PG_movable under page_lock so any compactor - * cannot try to migrate this page. - */ - folio_clear_isolated(src); - } + rc = mapping->a_ops->migrate_folio(mapping, dst, src, + mode); + else + rc = fallback_migrate_folio(mapping, dst, src, mode); + if (!rc) { /* - * Anonymous and movable src->mapping will be cleared by - * free_pages_prepare so don't reset it here for keeping - * the type to work PageAnon, for example. + * For pagecache folios, src->mapping must be cleared before src + * is freed. Anonymous folios must stay anonymous until freed. */ - if (!folio_mapping_flags(src)) + if (!folio_test_anon(src)) src->mapping = NULL; if (likely(!folio_is_zone_device(dst))) flush_dcache_folio(dst); } -out: return rc; } -static int __unmap_and_move(struct folio *src, struct folio *dst, - int force, enum migrate_mode mode) +/* + * To record some information during migration, we use unused private + * field of struct folio of the newly allocated destination folio. + * This is safe because nobody is using it except us. + */ +enum { + PAGE_WAS_MAPPED = BIT(0), + PAGE_WAS_MLOCKED = BIT(1), + PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED, +}; + +static void __migrate_folio_record(struct folio *dst, + int old_page_state, + struct anon_vma *anon_vma) +{ + dst->private = (void *)anon_vma + old_page_state; +} + +static void __migrate_folio_extract(struct folio *dst, + int *old_page_state, + struct anon_vma **anon_vmap) +{ + unsigned long private = (unsigned long)dst->private; + + *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES); + *old_page_state = private & PAGE_OLD_STATES; + dst->private = NULL; +} + +/* Restore the source folio to the original state upon failure */ +static void migrate_folio_undo_src(struct folio *src, + int page_was_mapped, + struct anon_vma *anon_vma, + bool locked, + struct list_head *ret) +{ + if (page_was_mapped) + remove_migration_ptes(src, src, 0); + /* Drop an anon_vma reference if we took one */ + if (anon_vma) + put_anon_vma(anon_vma); + if (locked) + folio_unlock(src); + if (ret) + list_move_tail(&src->lru, ret); +} + +/* Restore the destination folio to the original state upon failure */ +static void migrate_folio_undo_dst(struct folio *dst, bool locked, + free_folio_t put_new_folio, unsigned long private) +{ + if (locked) + folio_unlock(dst); + if (put_new_folio) + put_new_folio(dst, private); + else + folio_put(dst); +} + +/* Cleanup src folio upon migration success */ +static void migrate_folio_done(struct folio *src, + enum migrate_reason reason) +{ + if (likely(!page_has_movable_ops(&src->page)) && reason != MR_DEMOTION) + mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON + + folio_is_file_lru(src), -folio_nr_pages(src)); + + if (reason != MR_MEMORY_FAILURE) + /* We release the page in page_handle_poison. */ + folio_put(src); +} + +/* Obtain the lock on page, remove all ptes. */ +static int migrate_folio_unmap(new_folio_t get_new_folio, + free_folio_t put_new_folio, unsigned long private, + struct folio *src, struct folio **dstp, enum migrate_mode mode, + struct list_head *ret) { + struct folio *dst; int rc = -EAGAIN; - bool page_was_mapped = false; + int old_page_state = 0; struct anon_vma *anon_vma = NULL; - bool is_lru = !__PageMovable(&src->page); + bool locked = false; + bool dst_locked = false; + + dst = get_new_folio(src, private); + if (!dst) + return -ENOMEM; + *dstp = dst; + + dst->private = NULL; if (!folio_trylock(src)) { - if (!force || mode == MIGRATE_ASYNC) + if (mode == MIGRATE_ASYNC) goto out; /* @@ -1037,8 +1238,19 @@ static int __unmap_and_move(struct folio *src, struct folio *dst, if (current->flags & PF_MEMALLOC) goto out; + /* + * In "light" mode, we can wait for transient locks (eg + * inserting a page into the page table), but it's not + * worth waiting for I/O. + */ + if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src)) + goto out; + folio_lock(src); } + locked = true; + if (folio_test_mlocked(src)) + old_page_state |= PAGE_WAS_MLOCKED; if (folio_test_writeback(src)) { /* @@ -1049,14 +1261,11 @@ static int __unmap_and_move(struct folio *src, struct folio *dst, */ switch (mode) { case MIGRATE_SYNC: - case MIGRATE_SYNC_NO_COPY: break; default: rc = -EBUSY; - goto out_unlock; + goto out; } - if (!force) - goto out_unlock; folio_wait_writeback(src); } @@ -1086,11 +1295,12 @@ static int __unmap_and_move(struct folio *src, struct folio *dst, * This is much like races on refcount of oldpage: just don't BUG(). */ if (unlikely(!folio_trylock(dst))) - goto out_unlock; + goto out; + dst_locked = true; - if (unlikely(!is_lru)) { - rc = move_to_new_folio(dst, src, mode); - goto out_unlock_both; + if (unlikely(page_has_movable_ops(&src->page))) { + __migrate_folio_record(dst, old_page_state, anon_vma); + return 0; } /* @@ -1108,18 +1318,61 @@ static int __unmap_and_move(struct folio *src, struct folio *dst, if (!src->mapping) { if (folio_test_private(src)) { try_to_free_buffers(src); - goto out_unlock_both; + goto out; } } else if (folio_mapped(src)) { /* Establish migration ptes */ VM_BUG_ON_FOLIO(folio_test_anon(src) && !folio_test_ksm(src) && !anon_vma, src); - try_to_migrate(src, 0); - page_was_mapped = true; + try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0); + old_page_state |= PAGE_WAS_MAPPED; } - if (!folio_mapped(src)) - rc = move_to_new_folio(dst, src, mode); + if (!folio_mapped(src)) { + __migrate_folio_record(dst, old_page_state, anon_vma); + return 0; + } + +out: + /* + * A folio that has not been unmapped will be restored to + * right list unless we want to retry. + */ + if (rc == -EAGAIN) + ret = NULL; + + migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED, + anon_vma, locked, ret); + migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private); + + return rc; +} + +/* Migrate the folio to the newly allocated folio in dst. */ +static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private, + struct folio *src, struct folio *dst, + enum migrate_mode mode, enum migrate_reason reason, + struct list_head *ret) +{ + int rc; + int old_page_state = 0; + struct anon_vma *anon_vma = NULL; + struct list_head *prev; + + __migrate_folio_extract(dst, &old_page_state, &anon_vma); + prev = dst->lru.prev; + list_del(&dst->lru); + + if (unlikely(page_has_movable_ops(&src->page))) { + rc = migrate_movable_ops_page(&dst->page, &src->page, mode); + if (rc) + goto out; + goto out_unlock_both; + } + + rc = move_to_new_folio(dst, src, mode); + if (rc) + goto out; /* * When successful, push dst to LRU immediately: so that if it @@ -1130,111 +1383,50 @@ static int __unmap_and_move(struct folio *src, struct folio *dst, * unsuccessful, and other cases when a page has been temporarily * isolated from the unevictable LRU: but this case is the easiest. */ - if (rc == MIGRATEPAGE_SUCCESS) { - folio_add_lru(dst); - if (page_was_mapped) - lru_add_drain(); - } + folio_add_lru(dst); + if (old_page_state & PAGE_WAS_MLOCKED) + lru_add_drain(); - if (page_was_mapped) - remove_migration_ptes(src, - rc == MIGRATEPAGE_SUCCESS ? dst : src, false); + if (old_page_state & PAGE_WAS_MAPPED) + remove_migration_ptes(src, dst, 0); out_unlock_both: folio_unlock(dst); -out_unlock: - /* Drop an anon_vma reference if we took one */ - if (anon_vma) - put_anon_vma(anon_vma); - folio_unlock(src); -out: + folio_set_owner_migrate_reason(dst, reason); /* * If migration is successful, decrease refcount of dst, * which will not free the page because new page owner increased * refcounter. */ - if (rc == MIGRATEPAGE_SUCCESS) - folio_put(dst); - - return rc; -} - -/* - * Obtain the lock on folio, remove all ptes and migrate the folio - * to the newly allocated folio in dst. - */ -static int unmap_and_move(new_page_t get_new_page, - free_page_t put_new_page, - unsigned long private, struct folio *src, - int force, enum migrate_mode mode, - enum migrate_reason reason, - struct list_head *ret) -{ - struct folio *dst; - int rc = MIGRATEPAGE_SUCCESS; - struct page *newpage = NULL; + folio_put(dst); - if (!thp_migration_supported() && folio_test_transhuge(src)) - return -ENOSYS; - - if (folio_ref_count(src) == 1) { - /* Folio was freed from under us. So we are done. */ - folio_clear_active(src); - folio_clear_unevictable(src); - /* free_pages_prepare() will clear PG_isolated. */ - goto out; - } - - newpage = get_new_page(&src->page, private); - if (!newpage) - return -ENOMEM; - dst = page_folio(newpage); - - dst->private = NULL; - rc = __unmap_and_move(src, dst, force, mode); - if (rc == MIGRATEPAGE_SUCCESS) - set_page_owner_migrate_reason(&dst->page, reason); + /* + * A folio that has been migrated has all references removed + * and will be freed. + */ + list_del(&src->lru); + /* Drop an anon_vma reference if we took one */ + if (anon_vma) + put_anon_vma(anon_vma); + folio_unlock(src); + migrate_folio_done(src, reason); + return rc; out: - if (rc != -EAGAIN) { - /* - * A folio that has been migrated has all references - * removed and will be freed. A folio that has not been - * migrated will have kept its references and be restored. - */ - list_del(&src->lru); - } - /* - * If migration is successful, releases reference grabbed during - * isolation. Otherwise, restore the folio to right list unless - * we want to retry. + * A folio that has not been migrated will be restored to + * right list unless we want to retry. */ - if (rc == MIGRATEPAGE_SUCCESS) { - /* - * Compaction can migrate also non-LRU folios which are - * not accounted to NR_ISOLATED_*. They can be recognized - * as __folio_test_movable - */ - if (likely(!__folio_test_movable(src))) - mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON + - folio_is_file_lru(src), -folio_nr_pages(src)); - - if (reason != MR_MEMORY_FAILURE) - /* - * We release the folio in page_handle_poison. - */ - folio_put(src); - } else { - if (rc != -EAGAIN) - list_add_tail(&src->lru, ret); - - if (put_new_page) - put_new_page(&dst->page, private); - else - folio_put(dst); + if (rc == -EAGAIN) { + list_add(&dst->lru, prev); + __migrate_folio_record(dst, old_page_state, anon_vma); + return rc; } + migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED, + anon_vma, true, ret); + migrate_folio_undo_dst(dst, true, put_new_folio, private); + return rc; } @@ -1256,46 +1448,32 @@ out: * because then pte is replaced with migration swap entry and direct I/O code * will wait in the page fault for migration to complete. */ -static int unmap_and_move_huge_page(new_page_t get_new_page, - free_page_t put_new_page, unsigned long private, - struct page *hpage, int force, - enum migrate_mode mode, int reason, - struct list_head *ret) +static int unmap_and_move_huge_page(new_folio_t get_new_folio, + free_folio_t put_new_folio, unsigned long private, + struct folio *src, int force, enum migrate_mode mode, + int reason, struct list_head *ret) { - struct folio *dst, *src = page_folio(hpage); + struct folio *dst; int rc = -EAGAIN; int page_was_mapped = 0; - struct page *new_hpage; struct anon_vma *anon_vma = NULL; struct address_space *mapping = NULL; - /* - * Migratability of hugepages depends on architectures and their size. - * This check is necessary because some callers of hugepage migration - * like soft offline and memory hotremove don't walk through page - * tables or check whether the hugepage is pmd-based or not before - * kicking migration. - */ - if (!hugepage_migration_supported(page_hstate(hpage))) - return -ENOSYS; - if (folio_ref_count(src) == 1) { /* page was freed from under us. So we are done. */ - putback_active_hugepage(hpage); - return MIGRATEPAGE_SUCCESS; + folio_putback_hugetlb(src); + return 0; } - new_hpage = get_new_page(hpage, private); - if (!new_hpage) + dst = get_new_folio(src, private); + if (!dst) return -ENOMEM; - dst = page_folio(new_hpage); if (!folio_trylock(src)) { if (!force) goto out; switch (mode) { case MIGRATE_SYNC: - case MIGRATE_SYNC_NO_COPY: break; default: goto out; @@ -1329,7 +1507,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, * semaphore in write mode here and set TTU_RMAP_LOCKED * to let lower levels know we have taken the lock. */ - mapping = hugetlb_page_mapping_lock_write(hpage); + mapping = hugetlb_folio_mapping_lock_write(src); if (unlikely(!mapping)) goto unlock_put_anon; @@ -1347,8 +1525,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, rc = move_to_new_folio(dst, src, mode); if (page_was_mapped) - remove_migration_ptes(src, - rc == MIGRATEPAGE_SUCCESS ? dst : src, false); + remove_migration_ptes(src, !rc ? dst : src, 0); unlock_put_anon: folio_unlock(dst); @@ -1357,37 +1534,43 @@ put_anon: if (anon_vma) put_anon_vma(anon_vma); - if (rc == MIGRATEPAGE_SUCCESS) { + if (!rc) { move_hugetlb_state(src, dst, reason); - put_new_page = NULL; + put_new_folio = NULL; } out_unlock: folio_unlock(src); out: - if (rc == MIGRATEPAGE_SUCCESS) - putback_active_hugepage(hpage); + if (!rc) + folio_putback_hugetlb(src); else if (rc != -EAGAIN) list_move_tail(&src->lru, ret); /* - * If migration was not successful and there's a freeing callback, use - * it. Otherwise, put_page() will drop the reference grabbed during - * isolation. + * If migration was not successful and there's a freeing callback, + * return the folio to that special allocator. Otherwise, simply drop + * our additional reference. */ - if (put_new_page) - put_new_page(new_hpage, private); + if (put_new_folio) + put_new_folio(dst, private); else - putback_active_hugepage(new_hpage); + folio_put(dst); return rc; } -static inline int try_split_folio(struct folio *folio, struct list_head *split_folios) +static inline int try_split_folio(struct folio *folio, struct list_head *split_folios, + enum migrate_mode mode) { int rc; - folio_lock(folio); + if (mode == MIGRATE_ASYNC) { + if (!folio_trylock(folio)) + return -EAGAIN; + } else { + folio_lock(folio); + } rc = split_folio_to_list(folio, split_folios); folio_unlock(folio); if (!rc) @@ -1396,101 +1579,279 @@ static inline int try_split_folio(struct folio *folio, struct list_head *split_f return rc; } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR +#else +#define NR_MAX_BATCHED_MIGRATION 512 +#endif +#define NR_MAX_MIGRATE_PAGES_RETRY 10 +#define NR_MAX_MIGRATE_ASYNC_RETRY 3 +#define NR_MAX_MIGRATE_SYNC_RETRY \ + (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY) + +struct migrate_pages_stats { + int nr_succeeded; /* Normal and large folios migrated successfully, in + units of base pages */ + int nr_failed_pages; /* Normal and large folios failed to be migrated, in + units of base pages. Untried folios aren't counted */ + int nr_thp_succeeded; /* THP migrated successfully */ + int nr_thp_failed; /* THP failed to be migrated */ + int nr_thp_split; /* THP split before migrating */ + int nr_split; /* Large folio (include THP) split before migrating */ +}; + /* - * migrate_pages - migrate the folios specified in a list, to the free folios - * supplied as the target for the page migration - * - * @from: The list of folios to be migrated. - * @get_new_page: The function used to allocate free folios to be used - * as the target of the folio migration. - * @put_new_page: The function used to free target folios if migration - * fails, or NULL if no special handling is necessary. - * @private: Private data to be passed on to get_new_page() - * @mode: The migration mode that specifies the constraints for - * folio migration, if any. - * @reason: The reason for folio migration. - * @ret_succeeded: Set to the number of folios migrated successfully if - * the caller passes a non-NULL pointer. - * - * The function returns after 10 attempts or if no folios are movable any more - * because the list has become empty or no retryable folios exist any more. - * It is caller's responsibility to call putback_movable_pages() to return folios - * to the LRU or free list only if ret != 0. - * - * Returns the number of {normal folio, large folio, hugetlb} that were not - * migrated, or an error code. The number of large folio splits will be - * considered as the number of non-migrated large folio, no matter how many - * split folios of the large folio are migrated successfully. + * Returns the number of hugetlb folios that were not migrated, or an error code + * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable + * any more because the list has become empty or no retryable hugetlb folios + * exist any more. It is caller's responsibility to call putback_movable_pages() + * only if ret != 0. */ -int migrate_pages(struct list_head *from, new_page_t get_new_page, - free_page_t put_new_page, unsigned long private, - enum migrate_mode mode, int reason, unsigned int *ret_succeeded) +static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio, + free_folio_t put_new_folio, unsigned long private, + enum migrate_mode mode, int reason, + struct migrate_pages_stats *stats, + struct list_head *ret_folios) { int retry = 1; - int large_retry = 1; - int thp_retry = 1; int nr_failed = 0; - int nr_failed_pages = 0; int nr_retry_pages = 0; - int nr_succeeded = 0; - int nr_thp_succeeded = 0; - int nr_large_failed = 0; - int nr_thp_failed = 0; - int nr_thp_split = 0; int pass = 0; - bool is_large = false; - bool is_thp = false; struct folio *folio, *folio2; int rc, nr_pages; - LIST_HEAD(ret_folios); - LIST_HEAD(split_folios); - bool nosplit = (reason == MR_NUMA_MISPLACED); - bool no_split_folio_counting = false; - trace_mm_migrate_pages_start(mode, reason); - -split_folio_migration: - for (pass = 0; pass < 10 && (retry || large_retry); pass++) { + for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) { retry = 0; - large_retry = 0; - thp_retry = 0; nr_retry_pages = 0; list_for_each_entry_safe(folio, folio2, from, lru) { - /* - * Large folio statistics is based on the source large - * folio. Capture required information that might get - * lost during migration. - */ - is_large = folio_test_large(folio) && !folio_test_hugetlb(folio); - is_thp = is_large && folio_test_pmd_mappable(folio); + if (!folio_test_hugetlb(folio)) + continue; + nr_pages = folio_nr_pages(folio); + cond_resched(); - if (folio_test_hugetlb(folio)) - rc = unmap_and_move_huge_page(get_new_page, - put_new_page, private, - &folio->page, pass > 2, mode, - reason, - &ret_folios); - else - rc = unmap_and_move(get_new_page, put_new_page, - private, folio, pass > 2, mode, - reason, &ret_folios); + /* + * Migratability of hugepages depends on architectures and + * their size. This check is necessary because some callers + * of hugepage migration like soft offline and memory + * hotremove don't walk through page tables or check whether + * the hugepage is pmd-based or not before kicking migration. + */ + if (!hugepage_migration_supported(folio_hstate(folio))) { + nr_failed++; + stats->nr_failed_pages += nr_pages; + list_move_tail(&folio->lru, ret_folios); + continue; + } + + rc = unmap_and_move_huge_page(get_new_folio, + put_new_folio, private, + folio, pass > 2, mode, + reason, ret_folios); /* * The rules are: - * Success: non hugetlb folio will be freed, hugetlb - * folio will be put back + * 0: hugetlb folio will be put back * -EAGAIN: stay on the from list * -ENOMEM: stay on the from list - * -ENOSYS: stay on the from list - * Other errno: put on ret_folios list then splice to - * from list + * Other errno: put on ret_folios list */ switch(rc) { + case -ENOMEM: + /* + * When memory is low, don't bother to try to migrate + * other folios, just exit. + */ + stats->nr_failed_pages += nr_pages + nr_retry_pages; + return -ENOMEM; + case -EAGAIN: + retry++; + nr_retry_pages += nr_pages; + break; + case 0: + stats->nr_succeeded += nr_pages; + break; + default: + /* + * Permanent failure (-EBUSY, etc.): + * unlike -EAGAIN case, the failed folio is + * removed from migration folio list and not + * retried in the next outer loop. + */ + nr_failed++; + stats->nr_failed_pages += nr_pages; + break; + } + } + } + /* + * nr_failed is number of hugetlb folios failed to be migrated. After + * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb + * folios as failed. + */ + nr_failed += retry; + stats->nr_failed_pages += nr_retry_pages; + + return nr_failed; +} + +static void migrate_folios_move(struct list_head *src_folios, + struct list_head *dst_folios, + free_folio_t put_new_folio, unsigned long private, + enum migrate_mode mode, int reason, + struct list_head *ret_folios, + struct migrate_pages_stats *stats, + int *retry, int *thp_retry, int *nr_failed, + int *nr_retry_pages) +{ + struct folio *folio, *folio2, *dst, *dst2; + bool is_thp; + int nr_pages; + int rc; + + dst = list_first_entry(dst_folios, struct folio, lru); + dst2 = list_next_entry(dst, lru); + list_for_each_entry_safe(folio, folio2, src_folios, lru) { + is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); + nr_pages = folio_nr_pages(folio); + + cond_resched(); + + rc = migrate_folio_move(put_new_folio, private, + folio, dst, mode, + reason, ret_folios); + /* + * The rules are: + * 0: folio will be freed + * -EAGAIN: stay on the unmap_folios list + * Other errno: put on ret_folios list + */ + switch (rc) { + case -EAGAIN: + *retry += 1; + *thp_retry += is_thp; + *nr_retry_pages += nr_pages; + break; + case 0: + stats->nr_succeeded += nr_pages; + stats->nr_thp_succeeded += is_thp; + break; + default: + *nr_failed += 1; + stats->nr_thp_failed += is_thp; + stats->nr_failed_pages += nr_pages; + break; + } + dst = dst2; + dst2 = list_next_entry(dst, lru); + } +} + +static void migrate_folios_undo(struct list_head *src_folios, + struct list_head *dst_folios, + free_folio_t put_new_folio, unsigned long private, + struct list_head *ret_folios) +{ + struct folio *folio, *folio2, *dst, *dst2; + + dst = list_first_entry(dst_folios, struct folio, lru); + dst2 = list_next_entry(dst, lru); + list_for_each_entry_safe(folio, folio2, src_folios, lru) { + int old_page_state = 0; + struct anon_vma *anon_vma = NULL; + + __migrate_folio_extract(dst, &old_page_state, &anon_vma); + migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED, + anon_vma, true, ret_folios); + list_del(&dst->lru); + migrate_folio_undo_dst(dst, true, put_new_folio, private); + dst = dst2; + dst2 = list_next_entry(dst, lru); + } +} + +/* + * migrate_pages_batch() first unmaps folios in the from list as many as + * possible, then move the unmapped folios. + * + * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a + * lock or bit when we have locked more than one folio. Which may cause + * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the + * length of the from list must be <= 1. + */ +static int migrate_pages_batch(struct list_head *from, + new_folio_t get_new_folio, free_folio_t put_new_folio, + unsigned long private, enum migrate_mode mode, int reason, + struct list_head *ret_folios, struct list_head *split_folios, + struct migrate_pages_stats *stats, int nr_pass) +{ + int retry = 1; + int thp_retry = 1; + int nr_failed = 0; + int nr_retry_pages = 0; + int pass = 0; + bool is_thp = false; + bool is_large = false; + struct folio *folio, *folio2, *dst = NULL; + int rc, rc_saved = 0, nr_pages; + LIST_HEAD(unmap_folios); + LIST_HEAD(dst_folios); + bool nosplit = (reason == MR_NUMA_MISPLACED); + + VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC && + !list_empty(from) && !list_is_singular(from)); + + for (pass = 0; pass < nr_pass && retry; pass++) { + retry = 0; + thp_retry = 0; + nr_retry_pages = 0; + + list_for_each_entry_safe(folio, folio2, from, lru) { + is_large = folio_test_large(folio); + is_thp = folio_test_pmd_mappable(folio); + nr_pages = folio_nr_pages(folio); + + cond_resched(); + + /* + * The rare folio on the deferred split list should + * be split now. It should not count as a failure: + * but increment nr_failed because, without doing so, + * migrate_pages() may report success with (split but + * unmigrated) pages still on its fromlist; whereas it + * always reports success when its fromlist is empty. + * stats->nr_thp_failed should be increased too, + * otherwise stats inconsistency will happen when + * migrate_pages_batch is called via migrate_pages() + * with MIGRATE_SYNC and MIGRATE_ASYNC. + * + * Only check it without removing it from the list. + * Since the folio can be on deferred_split_scan() + * local list and removing it can cause the local list + * corruption. Folio split process below can handle it + * with the help of folio_ref_freeze(). + * + * nr_pages > 2 is needed to avoid checking order-1 + * page cache folios. They exist, in contrast to + * non-existent order-1 anonymous folios, and do not + * use _deferred_list. + */ + if (nr_pages > 2 && + !list_empty(&folio->_deferred_list) && + folio_test_partially_mapped(folio)) { + if (!try_split_folio(folio, split_folios, mode)) { + nr_failed++; + stats->nr_thp_failed += is_thp; + stats->nr_thp_split += is_thp; + stats->nr_split++; + continue; + } + } + /* * Large folio migration might be unsupported or - * the allocation could've failed so we should retry + * the allocation might be failed so we should retry * on the same folio with the large folio split * to normal folios. * @@ -1498,78 +1859,92 @@ split_folio_migration: * we will migrate them after the rest of the * list is processed. */ - case -ENOSYS: - /* Large folio migration is unsupported */ - if (is_large) { - nr_large_failed++; - nr_thp_failed += is_thp; - if (!try_split_folio(folio, &split_folios)) { - nr_thp_split += is_thp; - break; - } - /* Hugetlb migration is unsupported */ - } else if (!no_split_folio_counting) { - nr_failed++; + if (!thp_migration_supported() && is_thp) { + nr_failed++; + stats->nr_thp_failed++; + if (!try_split_folio(folio, split_folios, mode)) { + stats->nr_thp_split++; + stats->nr_split++; + continue; } + stats->nr_failed_pages += nr_pages; + list_move_tail(&folio->lru, ret_folios); + continue; + } - nr_failed_pages += nr_pages; - list_move_tail(&folio->lru, &ret_folios); - break; + /* + * If we are holding the last folio reference, the folio + * was freed from under us, so just drop our reference. + */ + if (likely(!page_has_movable_ops(&folio->page)) && + folio_ref_count(folio) == 1) { + folio_clear_active(folio); + folio_clear_unevictable(folio); + list_del(&folio->lru); + migrate_folio_done(folio, reason); + stats->nr_succeeded += nr_pages; + stats->nr_thp_succeeded += is_thp; + continue; + } + + rc = migrate_folio_unmap(get_new_folio, put_new_folio, + private, folio, &dst, mode, ret_folios); + /* + * The rules are: + * 0: folio will be put on unmap_folios list, + * dst folio put on dst_folios list + * -EAGAIN: stay on the from list + * -ENOMEM: stay on the from list + * Other errno: put on ret_folios list + */ + switch(rc) { case -ENOMEM: /* * When memory is low, don't bother to try to migrate - * other folios, just exit. + * other folios, move unmapped folios, then exit. */ - if (is_large) { - nr_large_failed++; - nr_thp_failed += is_thp; - /* Large folio NUMA faulting doesn't split to retry. */ - if (!nosplit) { - int ret = try_split_folio(folio, &split_folios); - - if (!ret) { - nr_thp_split += is_thp; - break; - } else if (reason == MR_LONGTERM_PIN && - ret == -EAGAIN) { - /* - * Try again to split large folio to - * mitigate the failure of longterm pinning. - */ - large_retry++; - thp_retry += is_thp; - nr_retry_pages += nr_pages; - break; - } + nr_failed++; + stats->nr_thp_failed += is_thp; + /* Large folio NUMA faulting doesn't split to retry. */ + if (is_large && !nosplit) { + int ret = try_split_folio(folio, split_folios, mode); + + if (!ret) { + stats->nr_thp_split += is_thp; + stats->nr_split++; + break; + } else if (reason == MR_LONGTERM_PIN && + ret == -EAGAIN) { + /* + * Try again to split large folio to + * mitigate the failure of longterm pinning. + */ + retry++; + thp_retry += is_thp; + nr_retry_pages += nr_pages; + /* Undo duplicated failure counting. */ + nr_failed--; + stats->nr_thp_failed -= is_thp; + break; } - } else if (!no_split_folio_counting) { - nr_failed++; } - nr_failed_pages += nr_pages + nr_retry_pages; - /* - * There might be some split folios of fail-to-migrate large - * folios left in split_folios list. Move them back to migration - * list so that they could be put back to the right list by - * the caller otherwise the folio refcnt will be leaked. - */ - list_splice_init(&split_folios, from); + stats->nr_failed_pages += nr_pages + nr_retry_pages; /* nr_failed isn't updated for not used */ - nr_large_failed += large_retry; - nr_thp_failed += thp_retry; - goto out; + stats->nr_thp_failed += thp_retry; + rc_saved = rc; + if (list_empty(&unmap_folios)) + goto out; + else + goto move; case -EAGAIN: - if (is_large) { - large_retry++; - thp_retry += is_thp; - } else if (!no_split_folio_counting) { - retry++; - } + retry++; + thp_retry += is_thp; nr_retry_pages += nr_pages; break; - case MIGRATEPAGE_SUCCESS: - nr_succeeded += nr_pages; - nr_thp_succeeded += is_thp; + case 0: + list_move_tail(&folio->lru, &unmap_folios); + list_add_tail(&dst->lru, &dst_folios); break; default: /* @@ -1578,40 +1953,189 @@ split_folio_migration: * removed from migration folio list and not * retried in the next outer loop. */ - if (is_large) { - nr_large_failed++; - nr_thp_failed += is_thp; - } else if (!no_split_folio_counting) { - nr_failed++; - } - - nr_failed_pages += nr_pages; + nr_failed++; + stats->nr_thp_failed += is_thp; + stats->nr_failed_pages += nr_pages; break; } } } nr_failed += retry; - nr_large_failed += large_retry; - nr_thp_failed += thp_retry; - nr_failed_pages += nr_retry_pages; + stats->nr_thp_failed += thp_retry; + stats->nr_failed_pages += nr_retry_pages; +move: + /* Flush TLBs for all unmapped folios */ + try_to_unmap_flush(); + + retry = 1; + for (pass = 0; pass < nr_pass && retry; pass++) { + retry = 0; + thp_retry = 0; + nr_retry_pages = 0; + + /* Move the unmapped folios */ + migrate_folios_move(&unmap_folios, &dst_folios, + put_new_folio, private, mode, reason, + ret_folios, stats, &retry, &thp_retry, + &nr_failed, &nr_retry_pages); + } + nr_failed += retry; + stats->nr_thp_failed += thp_retry; + stats->nr_failed_pages += nr_retry_pages; + + rc = rc_saved ? : nr_failed; +out: + /* Cleanup remaining folios */ + migrate_folios_undo(&unmap_folios, &dst_folios, + put_new_folio, private, ret_folios); + + return rc; +} + +static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio, + free_folio_t put_new_folio, unsigned long private, + enum migrate_mode mode, int reason, + struct list_head *ret_folios, struct list_head *split_folios, + struct migrate_pages_stats *stats) +{ + int rc, nr_failed = 0; + LIST_HEAD(folios); + struct migrate_pages_stats astats; + + memset(&astats, 0, sizeof(astats)); + /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */ + rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC, + reason, &folios, split_folios, &astats, + NR_MAX_MIGRATE_ASYNC_RETRY); + stats->nr_succeeded += astats.nr_succeeded; + stats->nr_thp_succeeded += astats.nr_thp_succeeded; + stats->nr_thp_split += astats.nr_thp_split; + stats->nr_split += astats.nr_split; + if (rc < 0) { + stats->nr_failed_pages += astats.nr_failed_pages; + stats->nr_thp_failed += astats.nr_thp_failed; + list_splice_tail(&folios, ret_folios); + return rc; + } + stats->nr_thp_failed += astats.nr_thp_split; /* - * Try to migrate split folios of fail-to-migrate large folios, no - * nr_failed counting in this round, since all split folios of a - * large folio is counted as 1 failure in the first round. + * Do not count rc, as pages will be retried below. + * Count nr_split only, since it includes nr_thp_split. */ + nr_failed += astats.nr_split; + /* + * Fall back to migrate all failed folios one by one synchronously. All + * failed folios except split THPs will be retried, so their failure + * isn't counted + */ + list_splice_tail_init(&folios, from); + while (!list_empty(from)) { + list_move(from->next, &folios); + rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, + private, mode, reason, ret_folios, + split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY); + list_splice_tail_init(&folios, ret_folios); + if (rc < 0) + return rc; + nr_failed += rc; + } + + return nr_failed; +} + +/* + * migrate_pages - migrate the folios specified in a list, to the free folios + * supplied as the target for the page migration + * + * @from: The list of folios to be migrated. + * @get_new_folio: The function used to allocate free folios to be used + * as the target of the folio migration. + * @put_new_folio: The function used to free target folios if migration + * fails, or NULL if no special handling is necessary. + * @private: Private data to be passed on to get_new_folio() + * @mode: The migration mode that specifies the constraints for + * folio migration, if any. + * @reason: The reason for folio migration. + * @ret_succeeded: Set to the number of folios migrated successfully if + * the caller passes a non-NULL pointer. + * + * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios + * are movable any more because the list has become empty or no retryable folios + * exist any more. It is caller's responsibility to call putback_movable_pages() + * only if ret != 0. + * + * Returns the number of {normal folio, large folio, hugetlb} that were not + * migrated, or an error code. The number of large folio splits will be + * considered as the number of non-migrated large folio, no matter how many + * split folios of the large folio are migrated successfully. + */ +int migrate_pages(struct list_head *from, new_folio_t get_new_folio, + free_folio_t put_new_folio, unsigned long private, + enum migrate_mode mode, int reason, unsigned int *ret_succeeded) +{ + int rc, rc_gather; + int nr_pages; + struct folio *folio, *folio2; + LIST_HEAD(folios); + LIST_HEAD(ret_folios); + LIST_HEAD(split_folios); + struct migrate_pages_stats stats; + + trace_mm_migrate_pages_start(mode, reason); + + memset(&stats, 0, sizeof(stats)); + + rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private, + mode, reason, &stats, &ret_folios); + if (rc_gather < 0) + goto out; + +again: + nr_pages = 0; + list_for_each_entry_safe(folio, folio2, from, lru) { + /* Retried hugetlb folios will be kept in list */ + if (folio_test_hugetlb(folio)) { + list_move_tail(&folio->lru, &ret_folios); + continue; + } + + nr_pages += folio_nr_pages(folio); + if (nr_pages >= NR_MAX_BATCHED_MIGRATION) + break; + } + if (nr_pages >= NR_MAX_BATCHED_MIGRATION) + list_cut_before(&folios, from, &folio2->lru); + else + list_splice_init(from, &folios); + if (mode == MIGRATE_ASYNC) + rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, + private, mode, reason, &ret_folios, + &split_folios, &stats, + NR_MAX_MIGRATE_PAGES_RETRY); + else + rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio, + private, mode, reason, &ret_folios, + &split_folios, &stats); + list_splice_tail_init(&folios, &ret_folios); + if (rc < 0) { + rc_gather = rc; + list_splice_tail(&split_folios, &ret_folios); + goto out; + } if (!list_empty(&split_folios)) { /* - * Move non-migrated folios (after 10 retries) to ret_folios - * to avoid migrating them again. + * Failure isn't counted since all split folios of a large folio + * is counted as 1 failure already. And, we only try to migrate + * with minimal effort, force MIGRATE_ASYNC mode and retry once. */ - list_splice_init(from, &ret_folios); - list_splice_init(&split_folios, from); - no_split_folio_counting = true; - retry = 1; - goto split_folio_migration; + migrate_pages_batch(&split_folios, get_new_folio, + put_new_folio, private, MIGRATE_ASYNC, reason, + &ret_folios, NULL, &stats, 1); + list_splice_tail_init(&split_folios, &ret_folios); } - - rc = nr_failed + nr_large_failed; + rc_gather += rc; + if (!list_empty(from)) + goto again; out: /* * Put the permanent failure folio back to migration list, they @@ -1624,61 +2148,61 @@ out: * are migrated successfully. */ if (list_empty(from)) - rc = 0; - - count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); - count_vm_events(PGMIGRATE_FAIL, nr_failed_pages); - count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded); - count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed); - count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split); - trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded, - nr_thp_failed, nr_thp_split, mode, reason); + rc_gather = 0; + + count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded); + count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages); + count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded); + count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed); + count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split); + trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages, + stats.nr_thp_succeeded, stats.nr_thp_failed, + stats.nr_thp_split, stats.nr_split, mode, + reason); if (ret_succeeded) - *ret_succeeded = nr_succeeded; + *ret_succeeded = stats.nr_succeeded; - return rc; + return rc_gather; } -struct page *alloc_migration_target(struct page *page, unsigned long private) +struct folio *alloc_migration_target(struct folio *src, unsigned long private) { - struct folio *folio = page_folio(page); struct migration_target_control *mtc; gfp_t gfp_mask; unsigned int order = 0; - struct folio *new_folio = NULL; int nid; - int zidx; + enum zone_type zidx; mtc = (struct migration_target_control *)private; gfp_mask = mtc->gfp_mask; nid = mtc->nid; if (nid == NUMA_NO_NODE) - nid = folio_nid(folio); + nid = folio_nid(src); - if (folio_test_hugetlb(folio)) { - struct hstate *h = folio_hstate(folio); + if (folio_test_hugetlb(src)) { + struct hstate *h = folio_hstate(src); gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); - return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); + return alloc_hugetlb_folio_nodemask(h, nid, + mtc->nmask, gfp_mask, + htlb_allow_alloc_fallback(mtc->reason)); } - if (folio_test_large(folio)) { + if (folio_test_large(src)) { /* * clear __GFP_RECLAIM to make the migration callback * consistent with regular THP allocations. */ gfp_mask &= ~__GFP_RECLAIM; gfp_mask |= GFP_TRANSHUGE; - order = folio_order(folio); + order = folio_order(src); } - zidx = zone_idx(folio_zone(folio)); + zidx = folio_zonenum(src); if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) gfp_mask |= __GFP_HIGHMEM; - new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask); - - return &new_folio->page; + return __folio_alloc(gfp_mask, order, nid, mtc->nmask); } #ifdef CONFIG_NUMA @@ -1694,13 +2218,13 @@ static int store_status(int __user *status, int start, int value, int nr) return 0; } -static int do_move_pages_to_node(struct mm_struct *mm, - struct list_head *pagelist, int node) +static int do_move_pages_to_node(struct list_head *pagelist, int node) { int err; struct migration_target_control mtc = { .nid = node, .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, + .reason = MR_SYSCALL, }; err = migrate_pages(pagelist, alloc_migration_target, NULL, @@ -1710,83 +2234,71 @@ static int do_move_pages_to_node(struct mm_struct *mm, return err; } +static int __add_folio_for_migration(struct folio *folio, int node, + struct list_head *pagelist, bool migrate_all) +{ + if (is_zero_folio(folio) || is_huge_zero_folio(folio)) + return -EFAULT; + + if (folio_is_zone_device(folio)) + return -ENOENT; + + if (folio_nid(folio) == node) + return 0; + + if (folio_maybe_mapped_shared(folio) && !migrate_all) + return -EACCES; + + if (folio_test_hugetlb(folio)) { + if (folio_isolate_hugetlb(folio, pagelist)) + return 1; + } else if (folio_isolate_lru(folio)) { + list_add_tail(&folio->lru, pagelist); + node_stat_mod_folio(folio, + NR_ISOLATED_ANON + folio_is_file_lru(folio), + folio_nr_pages(folio)); + return 1; + } + return -EBUSY; +} + /* - * Resolves the given address to a struct page, isolates it from the LRU and + * Resolves the given address to a struct folio, isolates it from the LRU and * puts it to the given pagelist. * Returns: - * errno - if the page cannot be found/isolated + * errno - if the folio cannot be found/isolated * 0 - when it doesn't have to be migrated because it is already on the * target node * 1 - when it has been queued */ -static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, +static int add_folio_for_migration(struct mm_struct *mm, const void __user *p, int node, struct list_head *pagelist, bool migrate_all) { struct vm_area_struct *vma; - struct page *page; - int err; + struct folio_walk fw; + struct folio *folio; + unsigned long addr; + int err = -EFAULT; mmap_read_lock(mm); - err = -EFAULT; - vma = vma_lookup(mm, addr); - if (!vma || !vma_migratable(vma)) - goto out; + addr = (unsigned long)untagged_addr_remote(mm, p); - /* FOLL_DUMP to ignore special (like zero) pages */ - page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); - - err = PTR_ERR(page); - if (IS_ERR(page)) - goto out; - - err = -ENOENT; - if (!page) - goto out; - - if (is_zone_device_page(page)) - goto out_putpage; - - err = 0; - if (page_to_nid(page) == node) - goto out_putpage; - - err = -EACCES; - if (page_mapcount(page) > 1 && !migrate_all) - goto out_putpage; - - if (PageHuge(page)) { - if (PageHead(page)) { - err = isolate_hugetlb(page, pagelist); - if (!err) - err = 1; + vma = vma_lookup(mm, addr); + if (vma && vma_migratable(vma)) { + folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE); + if (folio) { + err = __add_folio_for_migration(folio, node, pagelist, + migrate_all); + folio_walk_end(&fw, vma); + } else { + err = -ENOENT; } - } else { - struct page *head; - - head = compound_head(page); - err = isolate_lru_page(head); - if (err) - goto out_putpage; - - err = 1; - list_add_tail(&head->lru, pagelist); - mod_node_page_state(page_pgdat(head), - NR_ISOLATED_ANON + page_is_file_lru(head), - thp_nr_pages(head)); } -out_putpage: - /* - * Either remove the duplicate refcount from - * isolate_lru_page() or drop the page ref if it was - * not isolated. - */ - put_page(page); -out: mmap_read_unlock(mm); return err; } -static int move_pages_and_store_status(struct mm_struct *mm, int node, +static int move_pages_and_store_status(int node, struct list_head *pagelist, int __user *status, int start, int i, unsigned long nr_pages) { @@ -1795,7 +2307,7 @@ static int move_pages_and_store_status(struct mm_struct *mm, int node, if (list_empty(pagelist)) return 0; - err = do_move_pages_to_node(mm, pagelist, node); + err = do_move_pages_to_node(pagelist, node); if (err) { /* * Positive err means the number of failed @@ -1822,6 +2334,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, const int __user *nodes, int __user *status, int flags) { + compat_uptr_t __user *compat_pages = (void __user *)pages; int current_node = NUMA_NO_NODE; LIST_HEAD(pagelist); int start, i; @@ -1831,15 +2344,22 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, for (i = start = 0; i < nr_pages; i++) { const void __user *p; - unsigned long addr; int node; err = -EFAULT; - if (get_user(p, pages + i)) - goto out_flush; + if (in_compat_syscall()) { + compat_uptr_t cp; + + if (get_user(cp, compat_pages + i)) + goto out_flush; + + p = compat_ptr(cp); + } else { + if (get_user(p, pages + i)) + goto out_flush; + } if (get_user(node, nodes + i)) goto out_flush; - addr = (unsigned long)untagged_addr(p); err = -ENODEV; if (node < 0 || node >= MAX_NUMNODES) @@ -1855,7 +2375,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, current_node = node; start = i; } else if (node != current_node) { - err = move_pages_and_store_status(mm, current_node, + err = move_pages_and_store_status(current_node, &pagelist, status, start, i, nr_pages); if (err) goto out; @@ -1867,8 +2387,8 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, * Errors in the page lookup or isolation are not fatal and we simply * report them via status */ - err = add_page_for_migration(mm, addr, current_node, - &pagelist, flags & MPOL_MF_MOVE_ALL); + err = add_folio_for_migration(mm, p, current_node, &pagelist, + flags & MPOL_MF_MOVE_ALL); if (err > 0) { /* The page is successfully queued for migration */ @@ -1876,13 +2396,6 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, } /* - * The move_pages() man page does not have an -EEXIST choice, so - * use -EFAULT instead. - */ - if (err == -EEXIST) - err = -EFAULT; - - /* * If the page is already on the target node (!err), store the * node, otherwise, store the err. */ @@ -1890,7 +2403,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, if (err) goto out_flush; - err = move_pages_and_store_status(mm, current_node, &pagelist, + err = move_pages_and_store_status(current_node, &pagelist, status, start, i, nr_pages); if (err) { /* We have accounted for page i */ @@ -1902,7 +2415,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, } out_flush: /* Make sure we do not overwrite the existing error */ - err1 = move_pages_and_store_status(mm, current_node, &pagelist, + err1 = move_pages_and_store_status(current_node, &pagelist, status, start, i, nr_pages); if (err >= 0) err = err1; @@ -1924,28 +2437,26 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, for (i = 0; i < nr_pages; i++) { unsigned long addr = (unsigned long)(*pages); struct vm_area_struct *vma; - struct page *page; + struct folio_walk fw; + struct folio *folio; int err = -EFAULT; vma = vma_lookup(mm, addr); if (!vma) goto set_status; - /* FOLL_DUMP to ignore special (like zero) pages */ - page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); - - err = PTR_ERR(page); - if (IS_ERR(page)) - goto set_status; - - err = -ENOENT; - if (!page) - goto set_status; - - if (!is_zone_device_page(page)) - err = page_to_nid(page); - - put_page(page); + folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE); + if (folio) { + if (is_zero_folio(folio) || is_huge_zero_folio(folio)) + err = -EFAULT; + else if (folio_is_zone_device(folio)) + err = -ENOENT; + else + err = folio_nid(folio); + folio_walk_end(&fw, vma); + } else { + err = -ENOENT; + } set_status: *status = err; @@ -1958,6 +2469,7 @@ set_status: static int get_compat_pages_array(const void __user *chunk_pages[], const void __user * __user *pages, + unsigned long chunk_offset, unsigned long chunk_nr) { compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; @@ -1965,7 +2477,7 @@ static int get_compat_pages_array(const void __user *chunk_pages[], int i; for (i = 0; i < chunk_nr; i++) { - if (get_user(p, pages32 + i)) + if (get_user(p, pages32 + chunk_offset + i)) return -EFAULT; chunk_pages[i] = compat_ptr(p); } @@ -1984,27 +2496,28 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, #define DO_PAGES_STAT_CHUNK_NR 16UL const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; int chunk_status[DO_PAGES_STAT_CHUNK_NR]; + unsigned long chunk_offset = 0; while (nr_pages) { unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR); if (in_compat_syscall()) { if (get_compat_pages_array(chunk_pages, pages, - chunk_nr)) + chunk_offset, chunk_nr)) break; } else { - if (copy_from_user(chunk_pages, pages, + if (copy_from_user(chunk_pages, pages + chunk_offset, chunk_nr * sizeof(*chunk_pages))) break; } do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); - if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) + if (copy_to_user(status + chunk_offset, chunk_status, + chunk_nr * sizeof(*status))) break; - pages += chunk_nr; - status += chunk_nr; + chunk_offset += chunk_nr; nr_pages -= chunk_nr; } return nr_pages ? -EFAULT : 0; @@ -2025,25 +2538,19 @@ static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) return current->mm; } - /* Find the mm_struct */ - rcu_read_lock(); - task = find_task_by_vpid(pid); + task = find_get_task_by_vpid(pid); if (!task) { - rcu_read_unlock(); return ERR_PTR(-ESRCH); } - get_task_struct(task); /* * Check if this process has the right to modify the specified * process. Use the regular "ptrace_may_access()" checks. */ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { - rcu_read_unlock(); mm = ERR_PTR(-EPERM); goto out; } - rcu_read_unlock(); mm = ERR_PTR(security_task_movememory(task)); if (IS_ERR(mm)) @@ -2119,20 +2626,19 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat, if (!zone_watermark_ok(zone, 0, high_wmark_pages(zone) + nr_migrate_pages, - ZONE_MOVABLE, 0)) + ZONE_MOVABLE, ALLOC_CMA)) continue; return true; } return false; } -static struct page *alloc_misplaced_dst_page(struct page *page, +static struct folio *alloc_misplaced_dst_folio(struct folio *src, unsigned long data) { int nid = (int) data; - int order = compound_order(page); + int order = folio_order(src); gfp_t gfp = __GFP_THISNODE; - struct folio *new; if (order > 0) gfp |= GFP_TRANSHUGE_LIGHT; @@ -2141,110 +2647,104 @@ static struct page *alloc_misplaced_dst_page(struct page *page, __GFP_NOWARN; gfp &= ~__GFP_RECLAIM; } - new = __folio_alloc_node(gfp, order, nid); - - return &new->page; + return __folio_alloc_node(gfp, order, nid); } -static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) +/* + * Prepare for calling migrate_misplaced_folio() by isolating the folio if + * permitted. Must be called with the PTL still held. + */ +int migrate_misplaced_folio_prepare(struct folio *folio, + struct vm_area_struct *vma, int node) { - int nr_pages = thp_nr_pages(page); - int order = compound_order(page); + int nr_pages = folio_nr_pages(folio); + pg_data_t *pgdat = NODE_DATA(node); - VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); + if (folio_is_file_lru(folio)) { + /* + * Do not migrate file folios that are mapped in multiple + * processes with execute permissions as they are probably + * shared libraries. + * + * See folio_maybe_mapped_shared() on possible imprecision + * when we cannot easily detect if a folio is shared. + */ + if ((vma->vm_flags & VM_EXEC) && folio_maybe_mapped_shared(folio)) + return -EACCES; - /* Do not migrate THP mapped by multiple processes */ - if (PageTransHuge(page) && total_mapcount(page) > 1) - return 0; + /* + * Do not migrate dirty folios as not all filesystems can move + * dirty folios in MIGRATE_ASYNC mode which is a waste of + * cycles. + */ + if (folio_test_dirty(folio)) + return -EAGAIN; + } /* Avoid migrating to a node that is nearly full */ if (!migrate_balanced_pgdat(pgdat, nr_pages)) { int z; if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)) - return 0; + return -EAGAIN; for (z = pgdat->nr_zones - 1; z >= 0; z--) { if (managed_zone(pgdat->node_zones + z)) break; } - wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); - return 0; + + /* + * If there are no managed zones, it should not proceed + * further. + */ + if (z < 0) + return -EAGAIN; + + wakeup_kswapd(pgdat->node_zones + z, 0, + folio_order(folio), ZONE_MOVABLE); + return -EAGAIN; } - if (isolate_lru_page(page)) - return 0; + if (!folio_isolate_lru(folio)) + return -EAGAIN; - mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), + node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), nr_pages); - - /* - * Isolating the page has taken another reference, so the - * caller's reference can be safely dropped without the page - * disappearing underneath us during migration. - */ - put_page(page); - return 1; + return 0; } /* - * Attempt to migrate a misplaced page to the specified destination - * node. Caller is expected to have an elevated reference count on - * the page that will be dropped by this function before returning. + * Attempt to migrate a misplaced folio to the specified destination + * node. Caller is expected to have isolated the folio by calling + * migrate_misplaced_folio_prepare(), which will result in an + * elevated reference count on the folio. This function will un-isolate the + * folio, dereferencing the folio before returning. */ -int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, - int node) +int migrate_misplaced_folio(struct folio *folio, int node) { pg_data_t *pgdat = NODE_DATA(node); - int isolated; int nr_remaining; unsigned int nr_succeeded; LIST_HEAD(migratepages); - int nr_pages = thp_nr_pages(page); + struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio); + struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); - /* - * Don't migrate file pages that are mapped in multiple processes - * with execute permissions as they are probably shared libraries. - */ - if (page_mapcount(page) != 1 && page_is_file_lru(page) && - (vma->vm_flags & VM_EXEC)) - goto out; - - /* - * Also do not migrate dirty pages as not all filesystems can move - * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. - */ - if (page_is_file_lru(page) && PageDirty(page)) - goto out; - - isolated = numamigrate_isolate_page(pgdat, page); - if (!isolated) - goto out; - - list_add(&page->lru, &migratepages); - nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, + list_add(&folio->lru, &migratepages); + nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio, NULL, node, MIGRATE_ASYNC, MR_NUMA_MISPLACED, &nr_succeeded); - if (nr_remaining) { - if (!list_empty(&migratepages)) { - list_del(&page->lru); - mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + - page_is_file_lru(page), -nr_pages); - putback_lru_page(page); - } - isolated = 0; - } + if (nr_remaining && !list_empty(&migratepages)) + putback_movable_pages(&migratepages); if (nr_succeeded) { count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); - if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) - mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, - nr_succeeded); + count_memcg_events(memcg, NUMA_PAGE_MIGRATE, nr_succeeded); + if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) + && !node_is_toptier(folio_nid(folio)) + && node_is_toptier(node)) + mod_lruvec_state(lruvec, PGPROMOTE_SUCCESS, nr_succeeded); } + mem_cgroup_put(memcg); BUG_ON(!list_empty(&migratepages)); - return isolated; - -out: - put_page(page); - return 0; + return nr_remaining ? -EAGAIN : 0; } #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA */ |
