diff options
Diffstat (limited to 'mm/migrate.c')
| -rw-r--r-- | mm/migrate.c | 1432 |
1 files changed, 800 insertions, 632 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 78c9bd505b62..5169f9717f60 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -16,11 +16,10 @@ #include <linux/migrate.h> #include <linux/export.h> #include <linux/swap.h> -#include <linux/swapops.h> +#include <linux/leafops.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/mm_inline.h> -#include <linux/nsproxy.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/topology.h> @@ -35,30 +34,88 @@ #include <linux/syscalls.h> #include <linux/compat.h> #include <linux/hugetlb.h> -#include <linux/hugetlb_cgroup.h> #include <linux/gfp.h> -#include <linux/pfn_t.h> -#include <linux/memremap.h> -#include <linux/userfaultfd_k.h> -#include <linux/balloon_compaction.h> #include <linux/page_idle.h> #include <linux/page_owner.h> #include <linux/sched/mm.h> #include <linux/ptrace.h> -#include <linux/oom.h> #include <linux/memory.h> -#include <linux/random.h> #include <linux/sched/sysctl.h> #include <linux/memory-tiers.h> +#include <linux/pagewalk.h> #include <asm/tlbflush.h> #include <trace/events/migrate.h> #include "internal.h" +#include "swap.h" -bool isolate_movable_page(struct page *page, isolate_mode_t mode) +static const struct movable_operations *offline_movable_ops; +static const struct movable_operations *zsmalloc_movable_ops; + +int set_movable_ops(const struct movable_operations *ops, enum pagetype type) +{ + /* + * We only allow for selected types and don't handle concurrent + * registration attempts yet. + */ + switch (type) { + case PGTY_offline: + if (offline_movable_ops && ops) + return -EBUSY; + offline_movable_ops = ops; + break; + case PGTY_zsmalloc: + if (zsmalloc_movable_ops && ops) + return -EBUSY; + zsmalloc_movable_ops = ops; + break; + default: + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL_GPL(set_movable_ops); + +static const struct movable_operations *page_movable_ops(struct page *page) +{ + VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page); + + /* + * If we enable page migration for a page of a certain type by marking + * it as movable, the page type must be sticky until the page gets freed + * back to the buddy. + */ + if (PageOffline(page)) + /* Only balloon compaction sets PageOffline pages movable. */ + return offline_movable_ops; + if (PageZsmalloc(page)) + return zsmalloc_movable_ops; + + return NULL; +} + +/** + * isolate_movable_ops_page - isolate a movable_ops page for migration + * @page: The page. + * @mode: The isolation mode. + * + * Try to isolate a movable_ops page for migration. Will fail if the page is + * not a movable_ops page, if the page is already isolated for migration + * or if the page was just was released by its owner. + * + * Once isolated, the page cannot get freed until it is either putback + * or migrated. + * + * Returns true if isolation succeeded, otherwise false. + */ +bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode) { + /* + * TODO: these pages will not be folios in the future. All + * folio dependencies will have to be removed. + */ struct folio *folio = folio_get_nontail_page(page); const struct movable_operations *mops; @@ -74,20 +131,15 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode) if (!folio) goto out; - if (unlikely(folio_test_slab(folio))) - goto out_putfolio; - /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */ - smp_rmb(); /* - * Check movable flag before taking the page lock because + * Check for movable_ops pages before taking the page lock because * we use non-atomic bitops on newly allocated page flags so * unconditionally grabbing the lock ruins page's owner side. + * + * Note that once a page has movable_ops, it will stay that way + * until the page was freed. */ - if (unlikely(!__folio_test_movable(folio))) - goto out_putfolio; - /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */ - smp_rmb(); - if (unlikely(folio_test_slab(folio))) + if (unlikely(!page_has_movable_ops(page))) goto out_putfolio; /* @@ -104,18 +156,20 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode) if (unlikely(!folio_trylock(folio))) goto out_putfolio; - if (!folio_test_movable(folio) || folio_test_isolated(folio)) + VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page); + if (PageMovableOpsIsolated(page)) goto out_no_isolated; - mops = folio_movable_ops(folio); - VM_BUG_ON_FOLIO(!mops, folio); + mops = page_movable_ops(page); + if (WARN_ON_ONCE(!mops)) + goto out_no_isolated; - if (!mops->isolate_page(&folio->page, mode)) + if (!mops->isolate_page(page, mode)) goto out_no_isolated; - /* Driver shouldn't use PG_isolated bit of page->flags */ - WARN_ON_ONCE(folio_test_isolated(folio)); - folio_set_isolated(folio); + /* Driver shouldn't use the isolated flag */ + VM_WARN_ON_ONCE_PAGE(PageMovableOpsIsolated(page), page); + SetPageMovableOpsIsolated(page); folio_unlock(folio); return true; @@ -128,12 +182,68 @@ out: return false; } -static void putback_movable_folio(struct folio *folio) +/** + * putback_movable_ops_page - putback an isolated movable_ops page + * @page: The isolated page. + * + * Putback an isolated movable_ops page. + * + * After the page was putback, it might get freed instantly. + */ +static void putback_movable_ops_page(struct page *page) { - const struct movable_operations *mops = folio_movable_ops(folio); + /* + * TODO: these pages will not be folios in the future. All + * folio dependencies will have to be removed. + */ + struct folio *folio = page_folio(page); - mops->putback_page(&folio->page); - folio_clear_isolated(folio); + VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page); + VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(page), page); + folio_lock(folio); + page_movable_ops(page)->putback_page(page); + ClearPageMovableOpsIsolated(page); + folio_unlock(folio); + folio_put(folio); +} + +/** + * migrate_movable_ops_page - migrate an isolated movable_ops page + * @dst: The destination page. + * @src: The source page. + * @mode: The migration mode. + * + * Migrate an isolated movable_ops page. + * + * If the src page was already released by its owner, the src page is + * un-isolated (putback) and migration succeeds; the migration core will be the + * owner of both pages. + * + * If the src page was not released by its owner and the migration was + * successful, the owner of the src page and the dst page are swapped and + * the src page is un-isolated. + * + * If migration fails, the ownership stays unmodified and the src page + * remains isolated: migration may be retried later or the page can be putback. + * + * TODO: migration core will treat both pages as folios and lock them before + * this call to unlock them after this call. Further, the folio refcounts on + * src and dst are also released by migration core. These pages will not be + * folios in the future, so that must be reworked. + * + * Returns 0 on success, otherwise a negative error code. + */ +static int migrate_movable_ops_page(struct page *dst, struct page *src, + enum migrate_mode mode) +{ + int rc; + + VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src); + VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(src), src); + rc = page_movable_ops(src)->migrate_page(dst, src, mode); + if (!rc) + ClearPageMovableOpsIsolated(src); + return rc; } /* @@ -142,7 +252,7 @@ static void putback_movable_folio(struct folio *folio) * * This function shall be used whenever the isolated pageset has been * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() - * and isolate_hugetlb(). + * and folio_isolate_hugetlb(). */ void putback_movable_pages(struct list_head *l) { @@ -151,24 +261,12 @@ void putback_movable_pages(struct list_head *l) list_for_each_entry_safe(folio, folio2, l, lru) { if (unlikely(folio_test_hugetlb(folio))) { - folio_putback_active_hugetlb(folio); + folio_putback_hugetlb(folio); continue; } list_del(&folio->lru); - /* - * We isolated non-lru movable folio so here we can use - * __PageMovable because LRU folio's mapping cannot have - * PAGE_MAPPING_MOVABLE. - */ - if (unlikely(__folio_test_movable(folio))) { - VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio); - folio_lock(folio); - if (folio_test_movable(folio)) - putback_movable_folio(folio); - else - folio_clear_isolated(folio); - folio_unlock(folio); - folio_put(folio); + if (unlikely(page_has_movable_ops(&folio->page))) { + putback_movable_ops_page(&folio->page); } else { node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), -folio_nr_pages(folio)); @@ -177,19 +275,85 @@ void putback_movable_pages(struct list_head *l) } } +/* Must be called with an elevated refcount on the non-hugetlb folio */ +bool isolate_folio_to_list(struct folio *folio, struct list_head *list) +{ + if (folio_test_hugetlb(folio)) + return folio_isolate_hugetlb(folio, list); + + if (page_has_movable_ops(&folio->page)) { + if (!isolate_movable_ops_page(&folio->page, + ISOLATE_UNEVICTABLE)) + return false; + } else { + if (!folio_isolate_lru(folio)) + return false; + node_stat_add_folio(folio, NR_ISOLATED_ANON + + folio_is_file_lru(folio)); + } + list_add(&folio->lru, list); + return true; +} + +static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, + struct folio *folio, pte_t old_pte, unsigned long idx) +{ + struct page *page = folio_page(folio, idx); + pte_t newpte; + + if (PageCompound(page) || PageHWPoison(page)) + return false; + + VM_BUG_ON_PAGE(!PageAnon(page), page); + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(pte_present(old_pte), page); + VM_WARN_ON_ONCE_FOLIO(folio_is_device_private(folio), folio); + + if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) || + mm_forbids_zeropage(pvmw->vma->vm_mm)) + return false; + + /* + * The pmd entry mapping the old thp was flushed and the pte mapping + * this subpage has been non present. If the subpage is only zero-filled + * then map it to the shared zeropage. + */ + if (!pages_identical(page, ZERO_PAGE(0))) + return false; + + newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address), + pvmw->vma->vm_page_prot)); + + if (pte_swp_soft_dirty(old_pte)) + newpte = pte_mksoft_dirty(newpte); + if (pte_swp_uffd_wp(old_pte)) + newpte = pte_mkuffd_wp(newpte); + + set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte); + + dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio)); + return true; +} + +struct rmap_walk_arg { + struct folio *folio; + bool map_unused_to_zeropage; +}; + /* * Restore a potential migration pte to a working pte entry */ static bool remove_migration_pte(struct folio *folio, - struct vm_area_struct *vma, unsigned long addr, void *old) + struct vm_area_struct *vma, unsigned long addr, void *arg) { - DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); + struct rmap_walk_arg *rmap_walk_arg = arg; + DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION); while (page_vma_mapped_walk(&pvmw)) { rmap_t rmap_flags = RMAP_NONE; pte_t old_pte; pte_t pte; - swp_entry_t entry; + softleaf_t entry; struct page *new; unsigned long idx = 0; @@ -207,24 +371,30 @@ static bool remove_migration_pte(struct folio *folio, continue; } #endif + old_pte = ptep_get(pvmw.pte); + if (rmap_walk_arg->map_unused_to_zeropage && + try_to_map_unused_to_zeropage(&pvmw, folio, old_pte, idx)) + continue; folio_get(folio); pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); - old_pte = ptep_get(pvmw.pte); - if (pte_swp_soft_dirty(old_pte)) - pte = pte_mksoft_dirty(pte); - entry = pte_to_swp_entry(old_pte); - if (!is_migration_entry_young(entry)) + entry = softleaf_from_pte(old_pte); + if (!softleaf_is_migration_young(entry)) pte = pte_mkold(pte); - if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) + if (folio_test_dirty(folio) && softleaf_is_migration_dirty(entry)) pte = pte_mkdirty(pte); - if (is_writable_migration_entry(entry)) - pte = pte_mkwrite(pte); + if (pte_swp_soft_dirty(old_pte)) + pte = pte_mksoft_dirty(pte); + else + pte = pte_clear_soft_dirty(pte); + + if (softleaf_is_migration_write(entry)) + pte = pte_mkwrite(pte, vma); else if (pte_swp_uffd_wp(old_pte)) pte = pte_mkuffd_wp(pte); - if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) + if (folio_test_anon(folio) && !softleaf_is_migration_read(entry)) rmap_flags |= RMAP_EXCLUSIVE; if (unlikely(is_device_private_page(new))) { @@ -234,7 +404,7 @@ static bool remove_migration_pte(struct folio *folio, else entry = make_readable_device_private_entry( page_to_pfn(new)); - pte = swp_entry_to_pte(entry); + pte = softleaf_to_pte(entry); if (pte_swp_soft_dirty(old_pte)) pte = pte_swp_mksoft_dirty(pte); if (pte_swp_uffd_wp(old_pte)) @@ -243,26 +413,29 @@ static bool remove_migration_pte(struct folio *folio, #ifdef CONFIG_HUGETLB_PAGE if (folio_test_hugetlb(folio)) { - unsigned int shift = huge_page_shift(hstate_vma(vma)); + struct hstate *h = hstate_vma(vma); + unsigned int shift = huge_page_shift(h); + unsigned long psize = huge_page_size(h); pte = arch_make_huge_pte(pte, shift, vma->vm_flags); if (folio_test_anon(folio)) - hugepage_add_anon_rmap(new, vma, pvmw.address, - rmap_flags); + hugetlb_add_anon_rmap(folio, vma, pvmw.address, + rmap_flags); else - page_dup_file_rmap(new, true); - set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); + hugetlb_add_file_rmap(folio); + set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte, + psize); } else #endif { if (folio_test_anon(folio)) - page_add_anon_rmap(new, vma, pvmw.address, - rmap_flags); + folio_add_anon_rmap_pte(folio, new, vma, + pvmw.address, rmap_flags); else - page_add_file_rmap(new, vma, false); + folio_add_file_rmap_pte(folio, new, vma); set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); } - if (vma->vm_flags & VM_LOCKED) + if (READ_ONCE(vma->vm_flags) & VM_LOCKED) mlock_drain_local(); trace_remove_migration_pte(pvmw.address, pte_val(pte), @@ -279,14 +452,21 @@ static bool remove_migration_pte(struct folio *folio, * Get rid of all migration entries and replace them by * references to the indicated page. */ -void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) +void remove_migration_ptes(struct folio *src, struct folio *dst, int flags) { + struct rmap_walk_arg rmap_walk_arg = { + .folio = src, + .map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE, + }; + struct rmap_walk_control rwc = { .rmap_one = remove_migration_pte, - .arg = src, + .arg = &rmap_walk_arg, }; - if (locked) + VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src); + + if (flags & RMP_LOCKED) rmap_walk_locked(dst, &rwc); else rmap_walk(dst, &rwc); @@ -303,7 +483,7 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, spinlock_t *ptl; pte_t *ptep; pte_t pte; - swp_entry_t entry; + softleaf_t entry; ptep = pte_offset_map_lock(mm, pmd, address, &ptl); if (!ptep) @@ -312,11 +492,11 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, pte = ptep_get(ptep); pte_unmap(ptep); - if (!is_swap_pte(pte)) + if (pte_none(pte) || pte_present(pte)) goto out; - entry = pte_to_swp_entry(pte); - if (!is_migration_entry(entry)) + entry = softleaf_from_pte(pte); + if (!softleaf_is_migration(entry)) goto out; migration_entry_wait_on_locked(entry, ptl); @@ -332,19 +512,21 @@ out: * * This function will release the vma lock before returning. */ -void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep) +void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep); + softleaf_t entry; pte_t pte; hugetlb_vma_assert_locked(vma); spin_lock(ptl); - pte = huge_ptep_get(ptep); + pte = huge_ptep_get(vma->vm_mm, addr, ptep); - if (unlikely(!is_hugetlb_entry_migration(pte))) { - spin_unlock(ptl); - hugetlb_vma_unlock_read(vma); - } else { + if (huge_pte_none(pte)) + goto fail; + + entry = softleaf_from_pte(pte); + if (softleaf_is_migration(entry)) { /* * If migration entry existed, safe to release vma lock * here because the pgtable page won't be freed without the @@ -352,8 +534,13 @@ void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep) * lock release in migration_entry_wait_on_locked(). */ hugetlb_vma_unlock_read(vma); - migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl); + migration_entry_wait_on_locked(entry, ptl); + return; } + +fail: + spin_unlock(ptl); + hugetlb_vma_unlock_read(vma); } #endif @@ -363,112 +550,120 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) spinlock_t *ptl; ptl = pmd_lock(mm, pmd); - if (!is_pmd_migration_entry(*pmd)) + if (!pmd_is_migration_entry(*pmd)) goto unlock; - migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl); + migration_entry_wait_on_locked(softleaf_from_pmd(*pmd), ptl); return; unlock: spin_unlock(ptl); } #endif -static int folio_expected_refs(struct address_space *mapping, - struct folio *folio) -{ - int refs = 1; - if (!mapping) - return refs; - - refs += folio_nr_pages(folio); - if (folio_test_private(folio)) - refs++; - - return refs; -} - /* - * Replace the page in the mapping. + * Replace the folio in the mapping. * * The number of remaining references must be: - * 1 for anonymous pages without a mapping - * 2 for pages with a mapping - * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. + * 1 for anonymous folios without a mapping + * 2 for folios with a mapping + * 3 for folios with a mapping and the private flag set. */ -int folio_migrate_mapping(struct address_space *mapping, - struct folio *newfolio, struct folio *folio, int extra_count) +static int __folio_migrate_mapping(struct address_space *mapping, + struct folio *newfolio, struct folio *folio, int expected_count) { - XA_STATE(xas, &mapping->i_pages, folio_index(folio)); + XA_STATE(xas, &mapping->i_pages, folio->index); + struct swap_cluster_info *ci = NULL; struct zone *oldzone, *newzone; int dirty; - int expected_count = folio_expected_refs(mapping, folio) + extra_count; long nr = folio_nr_pages(folio); if (!mapping) { - /* Anonymous page without mapping */ - if (folio_ref_count(folio) != expected_count) - return -EAGAIN; + /* Take off deferred split queue while frozen and memcg set */ + if (folio_test_large(folio) && + folio_test_large_rmappable(folio)) { + if (!folio_ref_freeze(folio, expected_count)) + return -EAGAIN; + folio_unqueue_deferred_split(folio); + folio_ref_unfreeze(folio, expected_count); + } /* No turning back from here */ newfolio->index = folio->index; newfolio->mapping = folio->mapping; + if (folio_test_anon(folio) && folio_test_large(folio)) + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); if (folio_test_swapbacked(folio)) __folio_set_swapbacked(newfolio); - return MIGRATEPAGE_SUCCESS; + return 0; } oldzone = folio_zone(folio); newzone = folio_zone(newfolio); - xas_lock_irq(&xas); + if (folio_test_swapcache(folio)) + ci = swap_cluster_get_and_lock_irq(folio); + else + xas_lock_irq(&xas); + if (!folio_ref_freeze(folio, expected_count)) { - xas_unlock_irq(&xas); + if (ci) + swap_cluster_unlock_irq(ci); + else + xas_unlock_irq(&xas); return -EAGAIN; } + /* Take off deferred split queue while frozen and memcg set */ + folio_unqueue_deferred_split(folio); + /* * Now we know that no one else is looking at the folio: * no turning back from here. */ newfolio->index = folio->index; newfolio->mapping = folio->mapping; + if (folio_test_anon(folio) && folio_test_large(folio)) + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); folio_ref_add(newfolio, nr); /* add cache reference */ - if (folio_test_swapbacked(folio)) { + if (folio_test_swapbacked(folio)) __folio_set_swapbacked(newfolio); - if (folio_test_swapcache(folio)) { - folio_set_swapcache(newfolio); - newfolio->private = folio_get_private(folio); - } - } else { - VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); + if (folio_test_swapcache(folio)) { + folio_set_swapcache(newfolio); + newfolio->private = folio_get_private(folio); } - /* Move dirty while page refs frozen and newpage not yet exposed */ + /* Move dirty while folio refs frozen and newfolio not yet exposed */ dirty = folio_test_dirty(folio); if (dirty) { folio_clear_dirty(folio); folio_set_dirty(newfolio); } - xas_store(&xas, newfolio); + if (folio_test_swapcache(folio)) + __swap_cache_replace_folio(ci, folio, newfolio); + else + xas_store(&xas, newfolio); /* - * Drop cache reference from old page by unfreezing + * Drop cache reference from old folio by unfreezing * to one less reference. * We know this isn't the last reference. */ folio_ref_unfreeze(folio, expected_count - nr); - xas_unlock(&xas); /* Leave irq disabled to prevent preemption while updating stats */ + if (ci) + swap_cluster_unlock(ci); + else + xas_unlock(&xas); /* * If moved to a different zone then also account - * the page for that zone. Other VM counters will be + * the folio for that zone. Other VM counters will be * taken care of when we establish references to the - * new page and drop references to the old page. + * new folio and drop references to the old folio. * - * Note that anonymous pages are accounted for + * Note that anonymous folios are accounted for * via NR_FILE_PAGES and NR_ANON_MAPPED if they * are mapped to swap space. */ @@ -480,33 +675,44 @@ int folio_migrate_mapping(struct address_space *mapping, old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); - __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); - __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); + mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); + mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { - __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); - __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); + mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); + mod_lruvec_state(new_lruvec, NR_SHMEM, nr); if (folio_test_pmd_mappable(folio)) { - __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr); - __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr); + mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr); + mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr); } } #ifdef CONFIG_SWAP if (folio_test_swapcache(folio)) { - __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); - __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); + mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); + mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); } #endif if (dirty && mapping_can_writeback(mapping)) { - __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); + mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); - __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); + mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); } } local_irq_enable(); - return MIGRATEPAGE_SUCCESS; + return 0; +} + +int folio_migrate_mapping(struct address_space *mapping, + struct folio *newfolio, struct folio *folio, int extra_count) +{ + int expected_count = folio_expected_ref_count(folio) + extra_count + 1; + + if (folio_ref_count(folio) != expected_count) + return -EAGAIN; + + return __folio_migrate_mapping(mapping, newfolio, folio, expected_count); } EXPORT_SYMBOL(folio_migrate_mapping); @@ -517,11 +723,17 @@ EXPORT_SYMBOL(folio_migrate_mapping); int migrate_huge_page_move_mapping(struct address_space *mapping, struct folio *dst, struct folio *src) { - XA_STATE(xas, &mapping->i_pages, folio_index(src)); - int expected_count; + XA_STATE(xas, &mapping->i_pages, src->index); + int rc, expected_count = folio_expected_ref_count(src) + 1; + + if (folio_ref_count(src) != expected_count) + return -EAGAIN; + + rc = folio_mc_copy(dst, src); + if (unlikely(rc)) + return rc; xas_lock_irq(&xas); - expected_count = 2 + folio_has_private(src); if (!folio_ref_freeze(src, expected_count)) { xas_unlock_irq(&xas); return -EAGAIN; @@ -530,15 +742,15 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, dst->index = src->index; dst->mapping = src->mapping; - folio_get(dst); + folio_ref_add(dst, folio_nr_pages(dst)); xas_store(&xas, dst); - folio_ref_unfreeze(src, expected_count - 1); + folio_ref_unfreeze(src, expected_count - folio_nr_pages(src)); xas_unlock_irq(&xas); - return MIGRATEPAGE_SUCCESS; + return 0; } /* @@ -548,8 +760,6 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio) { int cpupid; - if (folio_test_error(folio)) - folio_set_error(newfolio); if (folio_test_referenced(folio)) folio_set_referenced(newfolio); if (folio_test_uptodate(folio)) @@ -581,29 +791,31 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio) if (folio_test_idle(folio)) folio_set_idle(newfolio); + folio_migrate_refs(newfolio, folio); /* * Copy NUMA information to the new page, to prevent over-eager * future migrations of this same page. */ - cpupid = page_cpupid_xchg_last(&folio->page, -1); + cpupid = folio_xchg_last_cpupid(folio, -1); /* * For memory tiering mode, when migrate between slow and fast * memory node, reset cpupid, because that is used to record * page access time in slow memory node. */ if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) { - bool f_toptier = node_is_toptier(page_to_nid(&folio->page)); - bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page)); + bool f_toptier = node_is_toptier(folio_nid(folio)); + bool t_toptier = node_is_toptier(folio_nid(newfolio)); if (f_toptier != t_toptier) cpupid = -1; } - page_cpupid_xchg_last(&newfolio->page, cpupid); + folio_xchg_last_cpupid(newfolio, cpupid); folio_migrate_ksm(newfolio, folio); /* * Please do not reorder this without considering how mm/ksm.c's - * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). + * ksm_get_folio() depends upon ksm_migrate_page() and the + * swapcache flag. */ if (folio_test_swapcache(folio)) folio_clear_swapcache(folio); @@ -629,40 +841,39 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio) folio_set_readahead(newfolio); folio_copy_owner(newfolio, folio); + pgalloc_tag_swap(newfolio, folio); - if (!folio_test_hugetlb(folio)) - mem_cgroup_migrate(folio, newfolio); + mem_cgroup_migrate(folio, newfolio); } EXPORT_SYMBOL(folio_migrate_flags); -void folio_migrate_copy(struct folio *newfolio, struct folio *folio) -{ - folio_copy(newfolio, folio); - folio_migrate_flags(newfolio, folio); -} -EXPORT_SYMBOL(folio_migrate_copy); - /************************************************************ * Migration functions ***********************************************************/ -int migrate_folio_extra(struct address_space *mapping, struct folio *dst, - struct folio *src, enum migrate_mode mode, int extra_count) +static int __migrate_folio(struct address_space *mapping, struct folio *dst, + struct folio *src, void *src_private, + enum migrate_mode mode) { - int rc; + int rc, expected_count = folio_expected_ref_count(src) + 1; - BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */ + /* Check whether src does not have extra refs before we do more work */ + if (folio_ref_count(src) != expected_count) + return -EAGAIN; - rc = folio_migrate_mapping(mapping, dst, src, extra_count); + rc = folio_mc_copy(dst, src); + if (unlikely(rc)) + return rc; - if (rc != MIGRATEPAGE_SUCCESS) + rc = __folio_migrate_mapping(mapping, dst, src, expected_count); + if (rc) return rc; - if (mode != MIGRATE_SYNC_NO_COPY) - folio_migrate_copy(dst, src); - else - folio_migrate_flags(dst, src); - return MIGRATEPAGE_SUCCESS; + if (src_private) + folio_attach_private(dst, folio_detach_private(src)); + + folio_migrate_flags(dst, src); + return 0; } /** @@ -673,14 +884,15 @@ int migrate_folio_extra(struct address_space *mapping, struct folio *dst, * @mode: How to migrate the page. * * Common logic to directly migrate a single LRU folio suitable for - * folios that do not use PagePrivate/PagePrivate2. + * folios that do not have private data. * * Folios are locked upon entry and exit. */ int migrate_folio(struct address_space *mapping, struct folio *dst, - struct folio *src, enum migrate_mode mode) + struct folio *src, enum migrate_mode mode) { - return migrate_folio_extra(mapping, dst, src, mode, 0); + BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */ + return __migrate_folio(mapping, dst, src, NULL, mode); } EXPORT_SYMBOL(migrate_folio); @@ -731,7 +943,7 @@ static int __buffer_migrate_folio(struct address_space *mapping, return migrate_folio(mapping, dst, src, mode); /* Check whether page does not have extra refs before we do more work */ - expected_count = folio_expected_refs(mapping, src); + expected_count = folio_expected_ref_count(src) + 1; if (folio_ref_count(src) != expected_count) return -EAGAIN; @@ -739,12 +951,14 @@ static int __buffer_migrate_folio(struct address_space *mapping, return -EAGAIN; if (check_refs) { - bool busy; + bool busy, migrating; bool invalidated = false; + migrating = test_and_set_bit_lock(BH_Migrate, &head->b_state); + VM_WARN_ON_ONCE(migrating); recheck_buffers: busy = false; - spin_lock(&mapping->private_lock); + spin_lock(&mapping->i_private_lock); bh = head; do { if (atomic_read(&bh->b_count)) { @@ -753,39 +967,31 @@ recheck_buffers: } bh = bh->b_this_page; } while (bh != head); + spin_unlock(&mapping->i_private_lock); if (busy) { if (invalidated) { rc = -EAGAIN; goto unlock_buffers; } - spin_unlock(&mapping->private_lock); invalidate_bh_lrus(); invalidated = true; goto recheck_buffers; } } - rc = folio_migrate_mapping(mapping, dst, src, 0); - if (rc != MIGRATEPAGE_SUCCESS) + rc = filemap_migrate_folio(mapping, dst, src, mode); + if (rc) goto unlock_buffers; - folio_attach_private(dst, folio_detach_private(src)); - bh = head; do { folio_set_bh(bh, dst, bh_offset(bh)); bh = bh->b_this_page; } while (bh != head); - if (mode != MIGRATE_SYNC_NO_COPY) - folio_migrate_copy(dst, src); - else - folio_migrate_flags(dst, src); - - rc = MIGRATEPAGE_SUCCESS; unlock_buffers: if (check_refs) - spin_unlock(&mapping->private_lock); + clear_bit_unlock(BH_Migrate, &head->b_state); bh = head; do { unlock_buffer(bh); @@ -842,85 +1048,25 @@ EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs); int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { - int ret; - - ret = folio_migrate_mapping(mapping, dst, src, 0); - if (ret != MIGRATEPAGE_SUCCESS) - return ret; - - if (folio_get_private(src)) - folio_attach_private(dst, folio_detach_private(src)); - - if (mode != MIGRATE_SYNC_NO_COPY) - folio_migrate_copy(dst, src); - else - folio_migrate_flags(dst, src); - return MIGRATEPAGE_SUCCESS; + return __migrate_folio(mapping, dst, src, folio_get_private(src), mode); } EXPORT_SYMBOL_GPL(filemap_migrate_folio); /* - * Writeback a folio to clean the dirty state - */ -static int writeout(struct address_space *mapping, struct folio *folio) -{ - struct writeback_control wbc = { - .sync_mode = WB_SYNC_NONE, - .nr_to_write = 1, - .range_start = 0, - .range_end = LLONG_MAX, - .for_reclaim = 1 - }; - int rc; - - if (!mapping->a_ops->writepage) - /* No write method for the address space */ - return -EINVAL; - - if (!folio_clear_dirty_for_io(folio)) - /* Someone else already triggered a write */ - return -EAGAIN; - - /* - * A dirty folio may imply that the underlying filesystem has - * the folio on some queue. So the folio must be clean for - * migration. Writeout may mean we lose the lock and the - * folio state is no longer what we checked for earlier. - * At this point we know that the migration attempt cannot - * be successful. - */ - remove_migration_ptes(folio, folio, false); - - rc = mapping->a_ops->writepage(&folio->page, &wbc); - - if (rc != AOP_WRITEPAGE_ACTIVATE) - /* unlocked. Relock */ - folio_lock(folio); - - return (rc < 0) ? -EIO : -EAGAIN; -} - -/* * Default handling if a filesystem does not provide a migration function. */ static int fallback_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { - if (folio_test_dirty(src)) { - /* Only writeback folios in full synchronous migration */ - switch (mode) { - case MIGRATE_SYNC: - case MIGRATE_SYNC_NO_COPY: - break; - default: - return -EBUSY; - } - return writeout(mapping, src); - } + WARN_ONCE(mapping->a_ops->writepages, + "%ps does not implement migrate_folio\n", + mapping->a_ops); + if (folio_test_dirty(src)) + return -EBUSY; /* - * Buffers may be managed in a filesystem specific way. - * We must have no buffers or drop them. + * Filesystem may have private data at folio->private that we + * can't migrate automatically. */ if (!filemap_release_folio(src, GFP_KERNEL)) return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; @@ -929,119 +1075,83 @@ static int fallback_migrate_folio(struct address_space *mapping, } /* - * Move a page to a newly allocated page - * The page is locked and all ptes have been successfully removed. + * Move a src folio to a newly allocated dst folio. + * + * The src and dst folios are locked and the src folios was unmapped from + * the page tables. * - * The new page will have replaced the old page if this function - * is successful. + * On success, the src folio was replaced by the dst folio. * * Return value: * < 0 - error code - * MIGRATEPAGE_SUCCESS - success + * 0 - success */ static int move_to_new_folio(struct folio *dst, struct folio *src, enum migrate_mode mode) { + struct address_space *mapping = folio_mapping(src); int rc = -EAGAIN; - bool is_lru = !__PageMovable(&src->page); VM_BUG_ON_FOLIO(!folio_test_locked(src), src); VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst); - if (likely(is_lru)) { - struct address_space *mapping = folio_mapping(src); - - if (!mapping) - rc = migrate_folio(mapping, dst, src, mode); - else if (mapping->a_ops->migrate_folio) - /* - * Most folios have a mapping and most filesystems - * provide a migrate_folio callback. Anonymous folios - * are part of swap space which also has its own - * migrate_folio callback. This is the most common path - * for page migration. - */ - rc = mapping->a_ops->migrate_folio(mapping, dst, src, - mode); - else - rc = fallback_migrate_folio(mapping, dst, src, mode); - } else { - const struct movable_operations *mops; - + if (!mapping) + rc = migrate_folio(mapping, dst, src, mode); + else if (mapping_inaccessible(mapping)) + rc = -EOPNOTSUPP; + else if (mapping->a_ops->migrate_folio) /* - * In case of non-lru page, it could be released after - * isolation step. In that case, we shouldn't try migration. + * Most folios have a mapping and most filesystems + * provide a migrate_folio callback. Anonymous folios + * are part of swap space which also has its own + * migrate_folio callback. This is the most common path + * for page migration. */ - VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); - if (!folio_test_movable(src)) { - rc = MIGRATEPAGE_SUCCESS; - folio_clear_isolated(src); - goto out; - } - - mops = folio_movable_ops(src); - rc = mops->migrate_page(&dst->page, &src->page, mode); - WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && - !folio_test_isolated(src)); - } - - /* - * When successful, old pagecache src->mapping must be cleared before - * src is freed; but stats require that PageAnon be left as PageAnon. - */ - if (rc == MIGRATEPAGE_SUCCESS) { - if (__PageMovable(&src->page)) { - VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); - - /* - * We clear PG_movable under page_lock so any compactor - * cannot try to migrate this page. - */ - folio_clear_isolated(src); - } + rc = mapping->a_ops->migrate_folio(mapping, dst, src, + mode); + else + rc = fallback_migrate_folio(mapping, dst, src, mode); + if (!rc) { /* - * Anonymous and movable src->mapping will be cleared by - * free_pages_prepare so don't reset it here for keeping - * the type to work PageAnon, for example. + * For pagecache folios, src->mapping must be cleared before src + * is freed. Anonymous folios must stay anonymous until freed. */ - if (!folio_mapping_flags(src)) + if (!folio_test_anon(src)) src->mapping = NULL; if (likely(!folio_is_zone_device(dst))) flush_dcache_folio(dst); } -out: return rc; } /* - * To record some information during migration, we use some unused - * fields (mapping and private) of struct folio of the newly allocated - * destination folio. This is safe because nobody is using them - * except us. + * To record some information during migration, we use unused private + * field of struct folio of the newly allocated destination folio. + * This is safe because nobody is using it except us. */ -union migration_ptr { - struct anon_vma *anon_vma; - struct address_space *mapping; +enum { + PAGE_WAS_MAPPED = BIT(0), + PAGE_WAS_MLOCKED = BIT(1), + PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED, }; + static void __migrate_folio_record(struct folio *dst, - unsigned long page_was_mapped, + int old_page_state, struct anon_vma *anon_vma) { - union migration_ptr ptr = { .anon_vma = anon_vma }; - dst->mapping = ptr.mapping; - dst->private = (void *)page_was_mapped; + dst->private = (void *)anon_vma + old_page_state; } static void __migrate_folio_extract(struct folio *dst, - int *page_was_mappedp, + int *old_page_state, struct anon_vma **anon_vmap) { - union migration_ptr ptr = { .mapping = dst->mapping }; - *anon_vmap = ptr.anon_vma; - *page_was_mappedp = (unsigned long)dst->private; - dst->mapping = NULL; + unsigned long private = (unsigned long)dst->private; + + *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES); + *old_page_state = private & PAGE_OLD_STATES; dst->private = NULL; } @@ -1053,7 +1163,7 @@ static void migrate_folio_undo_src(struct folio *src, struct list_head *ret) { if (page_was_mapped) - remove_migration_ptes(src, src, false); + remove_migration_ptes(src, src, 0); /* Drop an anon_vma reference if we took one */ if (anon_vma) put_anon_vma(anon_vma); @@ -1079,12 +1189,7 @@ static void migrate_folio_undo_dst(struct folio *dst, bool locked, static void migrate_folio_done(struct folio *src, enum migrate_reason reason) { - /* - * Compaction can migrate also non-LRU pages which are - * not accounted to NR_ISOLATED_*. They can be recognized - * as __PageMovable - */ - if (likely(!__folio_test_movable(src))) + if (likely(!page_has_movable_ops(&src->page)) && reason != MR_DEMOTION) mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON + folio_is_file_lru(src), -folio_nr_pages(src)); @@ -1097,26 +1202,15 @@ static void migrate_folio_done(struct folio *src, static int migrate_folio_unmap(new_folio_t get_new_folio, free_folio_t put_new_folio, unsigned long private, struct folio *src, struct folio **dstp, enum migrate_mode mode, - enum migrate_reason reason, struct list_head *ret) + struct list_head *ret) { struct folio *dst; int rc = -EAGAIN; - int page_was_mapped = 0; + int old_page_state = 0; struct anon_vma *anon_vma = NULL; - bool is_lru = !__PageMovable(&src->page); bool locked = false; bool dst_locked = false; - if (folio_ref_count(src) == 1) { - /* Folio was freed from under us. So we are done. */ - folio_clear_active(src); - folio_clear_unevictable(src); - /* free_pages_prepare() will clear PG_isolated. */ - list_del(&src->lru); - migrate_folio_done(src, reason); - return MIGRATEPAGE_SUCCESS; - } - dst = get_new_folio(src, private); if (!dst) return -ENOMEM; @@ -1155,6 +1249,8 @@ static int migrate_folio_unmap(new_folio_t get_new_folio, folio_lock(src); } locked = true; + if (folio_test_mlocked(src)) + old_page_state |= PAGE_WAS_MLOCKED; if (folio_test_writeback(src)) { /* @@ -1165,7 +1261,6 @@ static int migrate_folio_unmap(new_folio_t get_new_folio, */ switch (mode) { case MIGRATE_SYNC: - case MIGRATE_SYNC_NO_COPY: break; default: rc = -EBUSY; @@ -1203,9 +1298,9 @@ static int migrate_folio_unmap(new_folio_t get_new_folio, goto out; dst_locked = true; - if (unlikely(!is_lru)) { - __migrate_folio_record(dst, page_was_mapped, anon_vma); - return MIGRATEPAGE_UNMAP; + if (unlikely(page_has_movable_ops(&src->page))) { + __migrate_folio_record(dst, old_page_state, anon_vma); + return 0; } /* @@ -1230,12 +1325,12 @@ static int migrate_folio_unmap(new_folio_t get_new_folio, VM_BUG_ON_FOLIO(folio_test_anon(src) && !folio_test_ksm(src) && !anon_vma, src); try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0); - page_was_mapped = 1; + old_page_state |= PAGE_WAS_MAPPED; } if (!folio_mapped(src)) { - __migrate_folio_record(dst, page_was_mapped, anon_vma); - return MIGRATEPAGE_UNMAP; + __migrate_folio_record(dst, old_page_state, anon_vma); + return 0; } out: @@ -1246,7 +1341,8 @@ out: if (rc == -EAGAIN) ret = NULL; - migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret); + migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED, + anon_vma, locked, ret); migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private); return rc; @@ -1259,22 +1355,25 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private, struct list_head *ret) { int rc; - int page_was_mapped = 0; + int old_page_state = 0; struct anon_vma *anon_vma = NULL; - bool is_lru = !__PageMovable(&src->page); struct list_head *prev; - __migrate_folio_extract(dst, &page_was_mapped, &anon_vma); + __migrate_folio_extract(dst, &old_page_state, &anon_vma); prev = dst->lru.prev; list_del(&dst->lru); + if (unlikely(page_has_movable_ops(&src->page))) { + rc = migrate_movable_ops_page(&dst->page, &src->page, mode); + if (rc) + goto out; + goto out_unlock_both; + } + rc = move_to_new_folio(dst, src, mode); if (rc) goto out; - if (unlikely(!is_lru)) - goto out_unlock_both; - /* * When successful, push dst to LRU immediately: so that if it * turns out to be an mlocked page, remove_migration_ptes() will @@ -1285,15 +1384,15 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private, * isolated from the unevictable LRU: but this case is the easiest. */ folio_add_lru(dst); - if (page_was_mapped) + if (old_page_state & PAGE_WAS_MLOCKED) lru_add_drain(); - if (page_was_mapped) - remove_migration_ptes(src, dst, false); + if (old_page_state & PAGE_WAS_MAPPED) + remove_migration_ptes(src, dst, 0); out_unlock_both: folio_unlock(dst); - set_page_owner_migrate_reason(&dst->page, reason); + folio_set_owner_migrate_reason(dst, reason); /* * If migration is successful, decrease refcount of dst, * which will not free the page because new page owner increased @@ -1320,11 +1419,12 @@ out: */ if (rc == -EAGAIN) { list_add(&dst->lru, prev); - __migrate_folio_record(dst, page_was_mapped, anon_vma); + __migrate_folio_record(dst, old_page_state, anon_vma); return rc; } - migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret); + migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED, + anon_vma, true, ret); migrate_folio_undo_dst(dst, true, put_new_folio, private); return rc; @@ -1361,8 +1461,8 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, if (folio_ref_count(src) == 1) { /* page was freed from under us. So we are done. */ - folio_putback_active_hugetlb(src); - return MIGRATEPAGE_SUCCESS; + folio_putback_hugetlb(src); + return 0; } dst = get_new_folio(src, private); @@ -1374,7 +1474,6 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, goto out; switch (mode) { case MIGRATE_SYNC: - case MIGRATE_SYNC_NO_COPY: break; default: goto out; @@ -1408,7 +1507,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, * semaphore in write mode here and set TTU_RMAP_LOCKED * to let lower levels know we have taken the lock. */ - mapping = hugetlb_page_mapping_lock_write(&src->page); + mapping = hugetlb_folio_mapping_lock_write(src); if (unlikely(!mapping)) goto unlock_put_anon; @@ -1426,8 +1525,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, rc = move_to_new_folio(dst, src, mode); if (page_was_mapped) - remove_migration_ptes(src, - rc == MIGRATEPAGE_SUCCESS ? dst : src, false); + remove_migration_ptes(src, !rc ? dst : src, 0); unlock_put_anon: folio_unlock(dst); @@ -1436,7 +1534,7 @@ put_anon: if (anon_vma) put_anon_vma(anon_vma); - if (rc == MIGRATEPAGE_SUCCESS) { + if (!rc) { move_hugetlb_state(src, dst, reason); put_new_folio = NULL; } @@ -1444,29 +1542,35 @@ put_anon: out_unlock: folio_unlock(src); out: - if (rc == MIGRATEPAGE_SUCCESS) - folio_putback_active_hugetlb(src); + if (!rc) + folio_putback_hugetlb(src); else if (rc != -EAGAIN) list_move_tail(&src->lru, ret); /* - * If migration was not successful and there's a freeing callback, use - * it. Otherwise, put_page() will drop the reference grabbed during - * isolation. + * If migration was not successful and there's a freeing callback, + * return the folio to that special allocator. Otherwise, simply drop + * our additional reference. */ if (put_new_folio) put_new_folio(dst, private); else - folio_putback_active_hugetlb(dst); + folio_put(dst); return rc; } -static inline int try_split_folio(struct folio *folio, struct list_head *split_folios) +static inline int try_split_folio(struct folio *folio, struct list_head *split_folios, + enum migrate_mode mode) { int rc; - folio_lock(folio); + if (mode == MIGRATE_ASYNC) { + if (!folio_trylock(folio)) + return -EAGAIN; + } else { + folio_lock(folio); + } rc = split_folio_to_list(folio, split_folios); folio_unlock(folio); if (!rc) @@ -1493,6 +1597,7 @@ struct migrate_pages_stats { int nr_thp_succeeded; /* THP migrated successfully */ int nr_thp_failed; /* THP failed to be migrated */ int nr_thp_split; /* THP split before migrating */ + int nr_split; /* Large folio (include THP) split before migrating */ }; /* @@ -1547,7 +1652,7 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio, reason, ret_folios); /* * The rules are: - * Success: hugetlb folio will be put back + * 0: hugetlb folio will be put back * -EAGAIN: stay on the from list * -ENOMEM: stay on the from list * Other errno: put on ret_folios list @@ -1564,7 +1669,7 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio, retry++; nr_retry_pages += nr_pages; break; - case MIGRATEPAGE_SUCCESS: + case 0: stats->nr_succeeded += nr_pages; break; default: @@ -1591,6 +1696,81 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio, return nr_failed; } +static void migrate_folios_move(struct list_head *src_folios, + struct list_head *dst_folios, + free_folio_t put_new_folio, unsigned long private, + enum migrate_mode mode, int reason, + struct list_head *ret_folios, + struct migrate_pages_stats *stats, + int *retry, int *thp_retry, int *nr_failed, + int *nr_retry_pages) +{ + struct folio *folio, *folio2, *dst, *dst2; + bool is_thp; + int nr_pages; + int rc; + + dst = list_first_entry(dst_folios, struct folio, lru); + dst2 = list_next_entry(dst, lru); + list_for_each_entry_safe(folio, folio2, src_folios, lru) { + is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); + nr_pages = folio_nr_pages(folio); + + cond_resched(); + + rc = migrate_folio_move(put_new_folio, private, + folio, dst, mode, + reason, ret_folios); + /* + * The rules are: + * 0: folio will be freed + * -EAGAIN: stay on the unmap_folios list + * Other errno: put on ret_folios list + */ + switch (rc) { + case -EAGAIN: + *retry += 1; + *thp_retry += is_thp; + *nr_retry_pages += nr_pages; + break; + case 0: + stats->nr_succeeded += nr_pages; + stats->nr_thp_succeeded += is_thp; + break; + default: + *nr_failed += 1; + stats->nr_thp_failed += is_thp; + stats->nr_failed_pages += nr_pages; + break; + } + dst = dst2; + dst2 = list_next_entry(dst, lru); + } +} + +static void migrate_folios_undo(struct list_head *src_folios, + struct list_head *dst_folios, + free_folio_t put_new_folio, unsigned long private, + struct list_head *ret_folios) +{ + struct folio *folio, *folio2, *dst, *dst2; + + dst = list_first_entry(dst_folios, struct folio, lru); + dst2 = list_next_entry(dst, lru); + list_for_each_entry_safe(folio, folio2, src_folios, lru) { + int old_page_state = 0; + struct anon_vma *anon_vma = NULL; + + __migrate_folio_extract(dst, &old_page_state, &anon_vma); + migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED, + anon_vma, true, ret_folios); + list_del(&dst->lru); + migrate_folio_undo_dst(dst, true, put_new_folio, private); + dst = dst2; + dst2 = list_next_entry(dst, lru); + } +} + /* * migrate_pages_batch() first unmaps folios in the from list as many as * possible, then move the unmapped folios. @@ -1612,7 +1792,8 @@ static int migrate_pages_batch(struct list_head *from, int nr_retry_pages = 0; int pass = 0; bool is_thp = false; - struct folio *folio, *folio2, *dst = NULL, *dst2; + bool is_large = false; + struct folio *folio, *folio2, *dst = NULL; int rc, rc_saved = 0, nr_pages; LIST_HEAD(unmap_folios); LIST_HEAD(dst_folios); @@ -1627,12 +1808,48 @@ static int migrate_pages_batch(struct list_head *from, nr_retry_pages = 0; list_for_each_entry_safe(folio, folio2, from, lru) { - is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); + is_large = folio_test_large(folio); + is_thp = folio_test_pmd_mappable(folio); nr_pages = folio_nr_pages(folio); cond_resched(); /* + * The rare folio on the deferred split list should + * be split now. It should not count as a failure: + * but increment nr_failed because, without doing so, + * migrate_pages() may report success with (split but + * unmigrated) pages still on its fromlist; whereas it + * always reports success when its fromlist is empty. + * stats->nr_thp_failed should be increased too, + * otherwise stats inconsistency will happen when + * migrate_pages_batch is called via migrate_pages() + * with MIGRATE_SYNC and MIGRATE_ASYNC. + * + * Only check it without removing it from the list. + * Since the folio can be on deferred_split_scan() + * local list and removing it can cause the local list + * corruption. Folio split process below can handle it + * with the help of folio_ref_freeze(). + * + * nr_pages > 2 is needed to avoid checking order-1 + * page cache folios. They exist, in contrast to + * non-existent order-1 anonymous folios, and do not + * use _deferred_list. + */ + if (nr_pages > 2 && + !list_empty(&folio->_deferred_list) && + folio_test_partially_mapped(folio)) { + if (!try_split_folio(folio, split_folios, mode)) { + nr_failed++; + stats->nr_thp_failed += is_thp; + stats->nr_thp_split += is_thp; + stats->nr_split++; + continue; + } + } + + /* * Large folio migration might be unsupported or * the allocation might be failed so we should retry * on the same folio with the large folio split @@ -1645,8 +1862,9 @@ static int migrate_pages_batch(struct list_head *from, if (!thp_migration_supported() && is_thp) { nr_failed++; stats->nr_thp_failed++; - if (!try_split_folio(folio, split_folios)) { + if (!try_split_folio(folio, split_folios, mode)) { stats->nr_thp_split++; + stats->nr_split++; continue; } stats->nr_failed_pages += nr_pages; @@ -1654,14 +1872,27 @@ static int migrate_pages_batch(struct list_head *from, continue; } + /* + * If we are holding the last folio reference, the folio + * was freed from under us, so just drop our reference. + */ + if (likely(!page_has_movable_ops(&folio->page)) && + folio_ref_count(folio) == 1) { + folio_clear_active(folio); + folio_clear_unevictable(folio); + list_del(&folio->lru); + migrate_folio_done(folio, reason); + stats->nr_succeeded += nr_pages; + stats->nr_thp_succeeded += is_thp; + continue; + } + rc = migrate_folio_unmap(get_new_folio, put_new_folio, - private, folio, &dst, mode, reason, - ret_folios); + private, folio, &dst, mode, ret_folios); /* * The rules are: - * Success: folio will be freed - * Unmap: folio will be put on unmap_folios list, - * dst folio put on dst_folios list + * 0: folio will be put on unmap_folios list, + * dst folio put on dst_folios list * -EAGAIN: stay on the from list * -ENOMEM: stay on the from list * Other errno: put on ret_folios list @@ -1675,11 +1906,12 @@ static int migrate_pages_batch(struct list_head *from, nr_failed++; stats->nr_thp_failed += is_thp; /* Large folio NUMA faulting doesn't split to retry. */ - if (folio_test_large(folio) && !nosplit) { - int ret = try_split_folio(folio, split_folios); + if (is_large && !nosplit) { + int ret = try_split_folio(folio, split_folios, mode); if (!ret) { stats->nr_thp_split += is_thp; + stats->nr_split++; break; } else if (reason == MR_LONGTERM_PIN && ret == -EAGAIN) { @@ -1710,11 +1942,7 @@ static int migrate_pages_batch(struct list_head *from, thp_retry += is_thp; nr_retry_pages += nr_pages; break; - case MIGRATEPAGE_SUCCESS: - stats->nr_succeeded += nr_pages; - stats->nr_thp_succeeded += is_thp; - break; - case MIGRATEPAGE_UNMAP: + case 0: list_move_tail(&folio->lru, &unmap_folios); list_add_tail(&dst->lru, &dst_folios); break; @@ -1745,42 +1973,11 @@ move: thp_retry = 0; nr_retry_pages = 0; - dst = list_first_entry(&dst_folios, struct folio, lru); - dst2 = list_next_entry(dst, lru); - list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { - is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); - nr_pages = folio_nr_pages(folio); - - cond_resched(); - - rc = migrate_folio_move(put_new_folio, private, - folio, dst, mode, - reason, ret_folios); - /* - * The rules are: - * Success: folio will be freed - * -EAGAIN: stay on the unmap_folios list - * Other errno: put on ret_folios list - */ - switch(rc) { - case -EAGAIN: - retry++; - thp_retry += is_thp; - nr_retry_pages += nr_pages; - break; - case MIGRATEPAGE_SUCCESS: - stats->nr_succeeded += nr_pages; - stats->nr_thp_succeeded += is_thp; - break; - default: - nr_failed++; - stats->nr_thp_failed += is_thp; - stats->nr_failed_pages += nr_pages; - break; - } - dst = dst2; - dst2 = list_next_entry(dst, lru); - } + /* Move the unmapped folios */ + migrate_folios_move(&unmap_folios, &dst_folios, + put_new_folio, private, mode, reason, + ret_folios, stats, &retry, &thp_retry, + &nr_failed, &nr_retry_pages); } nr_failed += retry; stats->nr_thp_failed += thp_retry; @@ -1789,20 +1986,8 @@ move: rc = rc_saved ? : nr_failed; out: /* Cleanup remaining folios */ - dst = list_first_entry(&dst_folios, struct folio, lru); - dst2 = list_next_entry(dst, lru); - list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { - int page_was_mapped = 0; - struct anon_vma *anon_vma = NULL; - - __migrate_folio_extract(dst, &page_was_mapped, &anon_vma); - migrate_folio_undo_src(folio, page_was_mapped, anon_vma, - true, ret_folios); - list_del(&dst->lru); - migrate_folio_undo_dst(dst, true, put_new_folio, private); - dst = dst2; - dst2 = list_next_entry(dst, lru); - } + migrate_folios_undo(&unmap_folios, &dst_folios, + put_new_folio, private, ret_folios); return rc; } @@ -1825,6 +2010,7 @@ static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio, stats->nr_succeeded += astats.nr_succeeded; stats->nr_thp_succeeded += astats.nr_thp_succeeded; stats->nr_thp_split += astats.nr_thp_split; + stats->nr_split += astats.nr_split; if (rc < 0) { stats->nr_failed_pages += astats.nr_failed_pages; stats->nr_thp_failed += astats.nr_thp_failed; @@ -1832,7 +2018,11 @@ static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio, return rc; } stats->nr_thp_failed += astats.nr_thp_split; - nr_failed += astats.nr_thp_split; + /* + * Do not count rc, as pages will be retried below. + * Count nr_split only, since it includes nr_thp_split. + */ + nr_failed += astats.nr_split; /* * Fall back to migrate all failed folios one by one synchronously. All * failed folios except split THPs will be retried, so their failure @@ -1967,7 +2157,8 @@ out: count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split); trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages, stats.nr_thp_succeeded, stats.nr_thp_failed, - stats.nr_thp_split, mode, reason); + stats.nr_thp_split, stats.nr_split, mode, + reason); if (ret_succeeded) *ret_succeeded = stats.nr_succeeded; @@ -1981,7 +2172,7 @@ struct folio *alloc_migration_target(struct folio *src, unsigned long private) gfp_t gfp_mask; unsigned int order = 0; int nid; - int zidx; + enum zone_type zidx; mtc = (struct migration_target_control *)private; gfp_mask = mtc->gfp_mask; @@ -1994,7 +2185,8 @@ struct folio *alloc_migration_target(struct folio *src, unsigned long private) gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); return alloc_hugetlb_folio_nodemask(h, nid, - mtc->nmask, gfp_mask); + mtc->nmask, gfp_mask, + htlb_allow_alloc_fallback(mtc->reason)); } if (folio_test_large(src)) { @@ -2006,7 +2198,7 @@ struct folio *alloc_migration_target(struct folio *src, unsigned long private) gfp_mask |= GFP_TRANSHUGE; order = folio_order(src); } - zidx = zone_idx(folio_zone(src)); + zidx = folio_zonenum(src); if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) gfp_mask |= __GFP_HIGHMEM; @@ -2026,13 +2218,13 @@ static int store_status(int __user *status, int start, int value, int nr) return 0; } -static int do_move_pages_to_node(struct mm_struct *mm, - struct list_head *pagelist, int node) +static int do_move_pages_to_node(struct list_head *pagelist, int node) { int err; struct migration_target_control mtc = { .nid = node, .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, + .reason = MR_SYSCALL, }; err = migrate_pages(pagelist, alloc_migration_target, NULL, @@ -2042,88 +2234,71 @@ static int do_move_pages_to_node(struct mm_struct *mm, return err; } +static int __add_folio_for_migration(struct folio *folio, int node, + struct list_head *pagelist, bool migrate_all) +{ + if (is_zero_folio(folio) || is_huge_zero_folio(folio)) + return -EFAULT; + + if (folio_is_zone_device(folio)) + return -ENOENT; + + if (folio_nid(folio) == node) + return 0; + + if (folio_maybe_mapped_shared(folio) && !migrate_all) + return -EACCES; + + if (folio_test_hugetlb(folio)) { + if (folio_isolate_hugetlb(folio, pagelist)) + return 1; + } else if (folio_isolate_lru(folio)) { + list_add_tail(&folio->lru, pagelist); + node_stat_mod_folio(folio, + NR_ISOLATED_ANON + folio_is_file_lru(folio), + folio_nr_pages(folio)); + return 1; + } + return -EBUSY; +} + /* - * Resolves the given address to a struct page, isolates it from the LRU and + * Resolves the given address to a struct folio, isolates it from the LRU and * puts it to the given pagelist. * Returns: - * errno - if the page cannot be found/isolated + * errno - if the folio cannot be found/isolated * 0 - when it doesn't have to be migrated because it is already on the * target node * 1 - when it has been queued */ -static int add_page_for_migration(struct mm_struct *mm, const void __user *p, +static int add_folio_for_migration(struct mm_struct *mm, const void __user *p, int node, struct list_head *pagelist, bool migrate_all) { struct vm_area_struct *vma; + struct folio_walk fw; + struct folio *folio; unsigned long addr; - struct page *page; - int err; - bool isolated; + int err = -EFAULT; mmap_read_lock(mm); addr = (unsigned long)untagged_addr_remote(mm, p); - err = -EFAULT; vma = vma_lookup(mm, addr); - if (!vma || !vma_migratable(vma)) - goto out; - - /* FOLL_DUMP to ignore special (like zero) pages */ - page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); - - err = PTR_ERR(page); - if (IS_ERR(page)) - goto out; - - err = -ENOENT; - if (!page) - goto out; - - if (is_zone_device_page(page)) - goto out_putpage; - - err = 0; - if (page_to_nid(page) == node) - goto out_putpage; - - err = -EACCES; - if (page_mapcount(page) > 1 && !migrate_all) - goto out_putpage; - - if (PageHuge(page)) { - if (PageHead(page)) { - isolated = isolate_hugetlb(page_folio(page), pagelist); - err = isolated ? 1 : -EBUSY; - } - } else { - struct page *head; - - head = compound_head(page); - isolated = isolate_lru_page(head); - if (!isolated) { - err = -EBUSY; - goto out_putpage; + if (vma && vma_migratable(vma)) { + folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE); + if (folio) { + err = __add_folio_for_migration(folio, node, pagelist, + migrate_all); + folio_walk_end(&fw, vma); + } else { + err = -ENOENT; } - - err = 1; - list_add_tail(&head->lru, pagelist); - mod_node_page_state(page_pgdat(head), - NR_ISOLATED_ANON + page_is_file_lru(head), - thp_nr_pages(head)); } -out_putpage: - /* - * Either remove the duplicate refcount from - * isolate_lru_page() or drop the page ref if it was - * not isolated. - */ - put_page(page); -out: mmap_read_unlock(mm); return err; } -static int move_pages_and_store_status(struct mm_struct *mm, int node, +static int move_pages_and_store_status(int node, struct list_head *pagelist, int __user *status, int start, int i, unsigned long nr_pages) { @@ -2132,7 +2307,7 @@ static int move_pages_and_store_status(struct mm_struct *mm, int node, if (list_empty(pagelist)) return 0; - err = do_move_pages_to_node(mm, pagelist, node); + err = do_move_pages_to_node(pagelist, node); if (err) { /* * Positive err means the number of failed @@ -2159,6 +2334,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, const int __user *nodes, int __user *status, int flags) { + compat_uptr_t __user *compat_pages = (void __user *)pages; int current_node = NUMA_NO_NODE; LIST_HEAD(pagelist); int start, i; @@ -2171,8 +2347,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, int node; err = -EFAULT; - if (get_user(p, pages + i)) - goto out_flush; + if (in_compat_syscall()) { + compat_uptr_t cp; + + if (get_user(cp, compat_pages + i)) + goto out_flush; + + p = compat_ptr(cp); + } else { + if (get_user(p, pages + i)) + goto out_flush; + } if (get_user(node, nodes + i)) goto out_flush; @@ -2190,7 +2375,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, current_node = node; start = i; } else if (node != current_node) { - err = move_pages_and_store_status(mm, current_node, + err = move_pages_and_store_status(current_node, &pagelist, status, start, i, nr_pages); if (err) goto out; @@ -2202,8 +2387,8 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, * Errors in the page lookup or isolation are not fatal and we simply * report them via status */ - err = add_page_for_migration(mm, p, current_node, &pagelist, - flags & MPOL_MF_MOVE_ALL); + err = add_folio_for_migration(mm, p, current_node, &pagelist, + flags & MPOL_MF_MOVE_ALL); if (err > 0) { /* The page is successfully queued for migration */ @@ -2211,13 +2396,6 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, } /* - * The move_pages() man page does not have an -EEXIST choice, so - * use -EFAULT instead. - */ - if (err == -EEXIST) - err = -EFAULT; - - /* * If the page is already on the target node (!err), store the * node, otherwise, store the err. */ @@ -2225,7 +2403,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, if (err) goto out_flush; - err = move_pages_and_store_status(mm, current_node, &pagelist, + err = move_pages_and_store_status(current_node, &pagelist, status, start, i, nr_pages); if (err) { /* We have accounted for page i */ @@ -2237,7 +2415,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, } out_flush: /* Make sure we do not overwrite the existing error */ - err1 = move_pages_and_store_status(mm, current_node, &pagelist, + err1 = move_pages_and_store_status(current_node, &pagelist, status, start, i, nr_pages); if (err >= 0) err = err1; @@ -2259,28 +2437,26 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, for (i = 0; i < nr_pages; i++) { unsigned long addr = (unsigned long)(*pages); struct vm_area_struct *vma; - struct page *page; + struct folio_walk fw; + struct folio *folio; int err = -EFAULT; vma = vma_lookup(mm, addr); if (!vma) goto set_status; - /* FOLL_DUMP to ignore special (like zero) pages */ - page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); - - err = PTR_ERR(page); - if (IS_ERR(page)) - goto set_status; - - err = -ENOENT; - if (!page) - goto set_status; - - if (!is_zone_device_page(page)) - err = page_to_nid(page); - - put_page(page); + folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE); + if (folio) { + if (is_zero_folio(folio) || is_huge_zero_folio(folio)) + err = -EFAULT; + else if (folio_is_zone_device(folio)) + err = -ENOENT; + else + err = folio_nid(folio); + folio_walk_end(&fw, vma); + } else { + err = -ENOENT; + } set_status: *status = err; @@ -2293,6 +2469,7 @@ set_status: static int get_compat_pages_array(const void __user *chunk_pages[], const void __user * __user *pages, + unsigned long chunk_offset, unsigned long chunk_nr) { compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; @@ -2300,7 +2477,7 @@ static int get_compat_pages_array(const void __user *chunk_pages[], int i; for (i = 0; i < chunk_nr; i++) { - if (get_user(p, pages32 + i)) + if (get_user(p, pages32 + chunk_offset + i)) return -EFAULT; chunk_pages[i] = compat_ptr(p); } @@ -2319,27 +2496,28 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, #define DO_PAGES_STAT_CHUNK_NR 16UL const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; int chunk_status[DO_PAGES_STAT_CHUNK_NR]; + unsigned long chunk_offset = 0; while (nr_pages) { unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR); if (in_compat_syscall()) { if (get_compat_pages_array(chunk_pages, pages, - chunk_nr)) + chunk_offset, chunk_nr)) break; } else { - if (copy_from_user(chunk_pages, pages, + if (copy_from_user(chunk_pages, pages + chunk_offset, chunk_nr * sizeof(*chunk_pages))) break; } do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); - if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) + if (copy_to_user(status + chunk_offset, chunk_status, + chunk_nr * sizeof(*status))) break; - pages += chunk_nr; - status += chunk_nr; + chunk_offset += chunk_nr; nr_pages -= chunk_nr; } return nr_pages ? -EFAULT : 0; @@ -2360,25 +2538,19 @@ static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) return current->mm; } - /* Find the mm_struct */ - rcu_read_lock(); - task = find_task_by_vpid(pid); + task = find_get_task_by_vpid(pid); if (!task) { - rcu_read_unlock(); return ERR_PTR(-ESRCH); } - get_task_struct(task); /* * Check if this process has the right to modify the specified * process. Use the regular "ptrace_may_access()" checks. */ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { - rcu_read_unlock(); mm = ERR_PTR(-EPERM); goto out; } - rcu_read_unlock(); mm = ERR_PTR(security_task_movememory(task)); if (IS_ERR(mm)) @@ -2454,7 +2626,7 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat, if (!zone_watermark_ok(zone, 0, high_wmark_pages(zone) + nr_migrate_pages, - ZONE_MOVABLE, 0)) + ZONE_MOVABLE, ALLOC_CMA)) continue; return true; } @@ -2478,105 +2650,101 @@ static struct folio *alloc_misplaced_dst_folio(struct folio *src, return __folio_alloc_node(gfp, order, nid); } -static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) +/* + * Prepare for calling migrate_misplaced_folio() by isolating the folio if + * permitted. Must be called with the PTL still held. + */ +int migrate_misplaced_folio_prepare(struct folio *folio, + struct vm_area_struct *vma, int node) { - int nr_pages = thp_nr_pages(page); - int order = compound_order(page); + int nr_pages = folio_nr_pages(folio); + pg_data_t *pgdat = NODE_DATA(node); - VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); + if (folio_is_file_lru(folio)) { + /* + * Do not migrate file folios that are mapped in multiple + * processes with execute permissions as they are probably + * shared libraries. + * + * See folio_maybe_mapped_shared() on possible imprecision + * when we cannot easily detect if a folio is shared. + */ + if ((vma->vm_flags & VM_EXEC) && folio_maybe_mapped_shared(folio)) + return -EACCES; - /* Do not migrate THP mapped by multiple processes */ - if (PageTransHuge(page) && total_mapcount(page) > 1) - return 0; + /* + * Do not migrate dirty folios as not all filesystems can move + * dirty folios in MIGRATE_ASYNC mode which is a waste of + * cycles. + */ + if (folio_test_dirty(folio)) + return -EAGAIN; + } /* Avoid migrating to a node that is nearly full */ if (!migrate_balanced_pgdat(pgdat, nr_pages)) { int z; if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)) - return 0; + return -EAGAIN; for (z = pgdat->nr_zones - 1; z >= 0; z--) { if (managed_zone(pgdat->node_zones + z)) break; } - wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); - return 0; + + /* + * If there are no managed zones, it should not proceed + * further. + */ + if (z < 0) + return -EAGAIN; + + wakeup_kswapd(pgdat->node_zones + z, 0, + folio_order(folio), ZONE_MOVABLE); + return -EAGAIN; } - if (!isolate_lru_page(page)) - return 0; + if (!folio_isolate_lru(folio)) + return -EAGAIN; - mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), + node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), nr_pages); - - /* - * Isolating the page has taken another reference, so the - * caller's reference can be safely dropped without the page - * disappearing underneath us during migration. - */ - put_page(page); - return 1; + return 0; } /* - * Attempt to migrate a misplaced page to the specified destination - * node. Caller is expected to have an elevated reference count on - * the page that will be dropped by this function before returning. + * Attempt to migrate a misplaced folio to the specified destination + * node. Caller is expected to have isolated the folio by calling + * migrate_misplaced_folio_prepare(), which will result in an + * elevated reference count on the folio. This function will un-isolate the + * folio, dereferencing the folio before returning. */ -int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, - int node) +int migrate_misplaced_folio(struct folio *folio, int node) { pg_data_t *pgdat = NODE_DATA(node); - int isolated; int nr_remaining; unsigned int nr_succeeded; LIST_HEAD(migratepages); - int nr_pages = thp_nr_pages(page); + struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio); + struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); - /* - * Don't migrate file pages that are mapped in multiple processes - * with execute permissions as they are probably shared libraries. - */ - if (page_mapcount(page) != 1 && page_is_file_lru(page) && - (vma->vm_flags & VM_EXEC)) - goto out; - - /* - * Also do not migrate dirty pages as not all filesystems can move - * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. - */ - if (page_is_file_lru(page) && PageDirty(page)) - goto out; - - isolated = numamigrate_isolate_page(pgdat, page); - if (!isolated) - goto out; - - list_add(&page->lru, &migratepages); + list_add(&folio->lru, &migratepages); nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio, NULL, node, MIGRATE_ASYNC, MR_NUMA_MISPLACED, &nr_succeeded); - if (nr_remaining) { - if (!list_empty(&migratepages)) { - list_del(&page->lru); - mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + - page_is_file_lru(page), -nr_pages); - putback_lru_page(page); - } - isolated = 0; - } + if (nr_remaining && !list_empty(&migratepages)) + putback_movable_pages(&migratepages); if (nr_succeeded) { count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); - if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) - mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, - nr_succeeded); + count_memcg_events(memcg, NUMA_PAGE_MIGRATE, nr_succeeded); + if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) + && !node_is_toptier(folio_nid(folio)) + && node_is_toptier(node)) + mod_lruvec_state(lruvec, PGPROMOTE_SUCCESS, nr_succeeded); } + mem_cgroup_put(memcg); BUG_ON(!list_empty(&migratepages)); - return isolated; - -out: - put_page(page); - return 0; + return nr_remaining ? -EAGAIN : 0; } #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA */ |
