summaryrefslogtreecommitdiff
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c388
1 files changed, 185 insertions, 203 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index f3ee6d8d5e2e..425401b2d4e1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -35,7 +35,6 @@
#include <linux/compat.h>
#include <linux/hugetlb.h>
#include <linux/gfp.h>
-#include <linux/pfn_t.h>
#include <linux/page_idle.h>
#include <linux/page_owner.h>
#include <linux/sched/mm.h>
@@ -44,15 +43,57 @@
#include <linux/sched/sysctl.h>
#include <linux/memory-tiers.h>
#include <linux/pagewalk.h>
+#include <linux/balloon_compaction.h>
+#include <linux/zsmalloc.h>
#include <asm/tlbflush.h>
#include <trace/events/migrate.h>
#include "internal.h"
+#include "swap.h"
-bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+static const struct movable_operations *page_movable_ops(struct page *page)
{
+ VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
+
+ /*
+ * If we enable page migration for a page of a certain type by marking
+ * it as movable, the page type must be sticky until the page gets freed
+ * back to the buddy.
+ */
+#ifdef CONFIG_BALLOON_COMPACTION
+ if (PageOffline(page))
+ /* Only balloon compaction sets PageOffline pages movable. */
+ return &balloon_mops;
+#endif /* CONFIG_BALLOON_COMPACTION */
+#if defined(CONFIG_ZSMALLOC) && defined(CONFIG_COMPACTION)
+ if (PageZsmalloc(page))
+ return &zsmalloc_mops;
+#endif /* defined(CONFIG_ZSMALLOC) && defined(CONFIG_COMPACTION) */
+ return NULL;
+}
+
+/**
+ * isolate_movable_ops_page - isolate a movable_ops page for migration
+ * @page: The page.
+ * @mode: The isolation mode.
+ *
+ * Try to isolate a movable_ops page for migration. Will fail if the page is
+ * not a movable_ops page, if the page is already isolated for migration
+ * or if the page was just was released by its owner.
+ *
+ * Once isolated, the page cannot get freed until it is either putback
+ * or migrated.
+ *
+ * Returns true if isolation succeeded, otherwise false.
+ */
+bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
+{
+ /*
+ * TODO: these pages will not be folios in the future. All
+ * folio dependencies will have to be removed.
+ */
struct folio *folio = folio_get_nontail_page(page);
const struct movable_operations *mops;
@@ -69,11 +110,14 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
goto out;
/*
- * Check movable flag before taking the page lock because
+ * Check for movable_ops pages before taking the page lock because
* we use non-atomic bitops on newly allocated page flags so
* unconditionally grabbing the lock ruins page's owner side.
+ *
+ * Note that once a page has movable_ops, it will stay that way
+ * until the page was freed.
*/
- if (unlikely(!__folio_test_movable(folio)))
+ if (unlikely(!page_has_movable_ops(page)))
goto out_putfolio;
/*
@@ -90,18 +134,20 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
if (unlikely(!folio_trylock(folio)))
goto out_putfolio;
- if (!folio_test_movable(folio) || folio_test_isolated(folio))
+ VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
+ if (PageMovableOpsIsolated(page))
goto out_no_isolated;
- mops = folio_movable_ops(folio);
- VM_BUG_ON_FOLIO(!mops, folio);
+ mops = page_movable_ops(page);
+ if (WARN_ON_ONCE(!mops))
+ goto out_no_isolated;
- if (!mops->isolate_page(&folio->page, mode))
+ if (!mops->isolate_page(page, mode))
goto out_no_isolated;
/* Driver shouldn't use the isolated flag */
- WARN_ON_ONCE(folio_test_isolated(folio));
- folio_set_isolated(folio);
+ VM_WARN_ON_ONCE_PAGE(PageMovableOpsIsolated(page), page);
+ SetPageMovableOpsIsolated(page);
folio_unlock(folio);
return true;
@@ -114,12 +160,69 @@ out:
return false;
}
-static void putback_movable_folio(struct folio *folio)
+/**
+ * putback_movable_ops_page - putback an isolated movable_ops page
+ * @page: The isolated page.
+ *
+ * Putback an isolated movable_ops page.
+ *
+ * After the page was putback, it might get freed instantly.
+ */
+static void putback_movable_ops_page(struct page *page)
{
- const struct movable_operations *mops = folio_movable_ops(folio);
+ /*
+ * TODO: these pages will not be folios in the future. All
+ * folio dependencies will have to be removed.
+ */
+ struct folio *folio = page_folio(page);
- mops->putback_page(&folio->page);
- folio_clear_isolated(folio);
+ VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
+ VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(page), page);
+ folio_lock(folio);
+ page_movable_ops(page)->putback_page(page);
+ ClearPageMovableOpsIsolated(page);
+ folio_unlock(folio);
+ folio_put(folio);
+}
+
+/**
+ * migrate_movable_ops_page - migrate an isolated movable_ops page
+ * @dst: The destination page.
+ * @src: The source page.
+ * @mode: The migration mode.
+ *
+ * Migrate an isolated movable_ops page.
+ *
+ * If the src page was already released by its owner, the src page is
+ * un-isolated (putback) and migration succeeds; the migration core will be the
+ * owner of both pages.
+ *
+ * If the src page was not released by its owner and the migration was
+ * successful, the owner of the src page and the dst page are swapped and
+ * the src page is un-isolated.
+ *
+ * If migration fails, the ownership stays unmodified and the src page
+ * remains isolated: migration may be retried later or the page can be putback.
+ *
+ * TODO: migration core will treat both pages as folios and lock them before
+ * this call to unlock them after this call. Further, the folio refcounts on
+ * src and dst are also released by migration core. These pages will not be
+ * folios in the future, so that must be reworked.
+ *
+ * Returns MIGRATEPAGE_SUCCESS on success, otherwise a negative error
+ * code.
+ */
+static int migrate_movable_ops_page(struct page *dst, struct page *src,
+ enum migrate_mode mode)
+{
+ int rc = MIGRATEPAGE_SUCCESS;
+
+ VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src);
+ VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(src), src);
+ rc = page_movable_ops(src)->migrate_page(dst, src, mode);
+ if (rc == MIGRATEPAGE_SUCCESS)
+ ClearPageMovableOpsIsolated(src);
+ return rc;
}
/*
@@ -141,20 +244,8 @@ void putback_movable_pages(struct list_head *l)
continue;
}
list_del(&folio->lru);
- /*
- * We isolated non-lru movable folio so here we can use
- * __folio_test_movable because LRU folio's mapping cannot
- * have PAGE_MAPPING_MOVABLE.
- */
- if (unlikely(__folio_test_movable(folio))) {
- VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
- folio_lock(folio);
- if (folio_test_movable(folio))
- putback_movable_folio(folio);
- else
- folio_clear_isolated(folio);
- folio_unlock(folio);
- folio_put(folio);
+ if (unlikely(page_has_movable_ops(&folio->page))) {
+ putback_movable_ops_page(&folio->page);
} else {
node_stat_mod_folio(folio, NR_ISOLATED_ANON +
folio_is_file_lru(folio), -folio_nr_pages(folio));
@@ -166,26 +257,20 @@ void putback_movable_pages(struct list_head *l)
/* Must be called with an elevated refcount on the non-hugetlb folio */
bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
{
- bool isolated, lru;
-
if (folio_test_hugetlb(folio))
return folio_isolate_hugetlb(folio, list);
- lru = !__folio_test_movable(folio);
- if (lru)
- isolated = folio_isolate_lru(folio);
- else
- isolated = isolate_movable_page(&folio->page,
- ISOLATE_UNEVICTABLE);
-
- if (!isolated)
- return false;
-
- list_add(&folio->lru, list);
- if (lru)
+ if (page_has_movable_ops(&folio->page)) {
+ if (!isolate_movable_ops_page(&folio->page,
+ ISOLATE_UNEVICTABLE))
+ return false;
+ } else {
+ if (!folio_isolate_lru(folio))
+ return false;
node_stat_add_folio(folio, NR_ISOLATED_ANON +
folio_is_file_lru(folio));
-
+ }
+ list_add(&folio->lru, list);
return true;
}
@@ -445,20 +530,6 @@ unlock:
}
#endif
-static int folio_expected_refs(struct address_space *mapping,
- struct folio *folio)
-{
- int refs = 1;
- if (!mapping)
- return refs;
-
- refs += folio_nr_pages(folio);
- if (folio_test_private(folio))
- refs++;
-
- return refs;
-}
-
/*
* Replace the folio in the mapping.
*
@@ -601,7 +672,7 @@ static int __folio_migrate_mapping(struct address_space *mapping,
int folio_migrate_mapping(struct address_space *mapping,
struct folio *newfolio, struct folio *folio, int extra_count)
{
- int expected_count = folio_expected_refs(mapping, folio) + extra_count;
+ int expected_count = folio_expected_ref_count(folio) + extra_count + 1;
if (folio_ref_count(folio) != expected_count)
return -EAGAIN;
@@ -618,7 +689,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src)
{
XA_STATE(xas, &mapping->i_pages, folio_index(src));
- int rc, expected_count = folio_expected_refs(mapping, src);
+ int rc, expected_count = folio_expected_ref_count(src) + 1;
if (folio_ref_count(src) != expected_count)
return -EAGAIN;
@@ -749,7 +820,7 @@ static int __migrate_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, void *src_private,
enum migrate_mode mode)
{
- int rc, expected_count = folio_expected_refs(mapping, src);
+ int rc, expected_count = folio_expected_ref_count(src) + 1;
/* Check whether src does not have extra refs before we do more work */
if (folio_ref_count(src) != expected_count)
@@ -837,7 +908,7 @@ static int __buffer_migrate_folio(struct address_space *mapping,
return migrate_folio(mapping, dst, src, mode);
/* Check whether page does not have extra refs before we do more work */
- expected_count = folio_expected_refs(mapping, src);
+ expected_count = folio_expected_ref_count(src) + 1;
if (folio_ref_count(src) != expected_count)
return -EAGAIN;
@@ -845,9 +916,11 @@ static int __buffer_migrate_folio(struct address_space *mapping,
return -EAGAIN;
if (check_refs) {
- bool busy;
+ bool busy, migrating;
bool invalidated = false;
+ migrating = test_and_set_bit_lock(BH_Migrate, &head->b_state);
+ VM_WARN_ON_ONCE(migrating);
recheck_buffers:
busy = false;
spin_lock(&mapping->i_private_lock);
@@ -859,12 +932,12 @@ recheck_buffers:
}
bh = bh->b_this_page;
} while (bh != head);
+ spin_unlock(&mapping->i_private_lock);
if (busy) {
if (invalidated) {
rc = -EAGAIN;
goto unlock_buffers;
}
- spin_unlock(&mapping->i_private_lock);
invalidate_bh_lrus();
invalidated = true;
goto recheck_buffers;
@@ -883,7 +956,7 @@ recheck_buffers:
unlock_buffers:
if (check_refs)
- spin_unlock(&mapping->i_private_lock);
+ clear_bit_unlock(BH_Migrate, &head->b_state);
bh = head;
do {
unlock_buffer(bh);
@@ -945,66 +1018,20 @@ int filemap_migrate_folio(struct address_space *mapping,
EXPORT_SYMBOL_GPL(filemap_migrate_folio);
/*
- * Writeback a folio to clean the dirty state
- */
-static int writeout(struct address_space *mapping, struct folio *folio)
-{
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE,
- .nr_to_write = 1,
- .range_start = 0,
- .range_end = LLONG_MAX,
- .for_reclaim = 1
- };
- int rc;
-
- if (!mapping->a_ops->writepage)
- /* No write method for the address space */
- return -EINVAL;
-
- if (!folio_clear_dirty_for_io(folio))
- /* Someone else already triggered a write */
- return -EAGAIN;
-
- /*
- * A dirty folio may imply that the underlying filesystem has
- * the folio on some queue. So the folio must be clean for
- * migration. Writeout may mean we lose the lock and the
- * folio state is no longer what we checked for earlier.
- * At this point we know that the migration attempt cannot
- * be successful.
- */
- remove_migration_ptes(folio, folio, 0);
-
- rc = mapping->a_ops->writepage(&folio->page, &wbc);
-
- if (rc != AOP_WRITEPAGE_ACTIVATE)
- /* unlocked. Relock */
- folio_lock(folio);
-
- return (rc < 0) ? -EIO : -EAGAIN;
-}
-
-/*
* Default handling if a filesystem does not provide a migration function.
*/
static int fallback_migrate_folio(struct address_space *mapping,
struct folio *dst, struct folio *src, enum migrate_mode mode)
{
- if (folio_test_dirty(src)) {
- /* Only writeback folios in full synchronous migration */
- switch (mode) {
- case MIGRATE_SYNC:
- break;
- default:
- return -EBUSY;
- }
- return writeout(mapping, src);
- }
+ WARN_ONCE(mapping->a_ops->writepages,
+ "%ps does not implement migrate_folio\n",
+ mapping->a_ops);
+ if (folio_test_dirty(src))
+ return -EBUSY;
/*
- * Buffers may be managed in a filesystem specific way.
- * We must have no buffers or drop them.
+ * Filesystem may have private data at folio->private that we
+ * can't migrate automatically.
*/
if (!filemap_release_folio(src, GFP_KERNEL))
return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
@@ -1013,11 +1040,12 @@ static int fallback_migrate_folio(struct address_space *mapping,
}
/*
- * Move a page to a newly allocated page
- * The page is locked and all ptes have been successfully removed.
+ * Move a src folio to a newly allocated dst folio.
+ *
+ * The src and dst folios are locked and the src folios was unmapped from
+ * the page tables.
*
- * The new page will have replaced the old page if this function
- * is successful.
+ * On success, the src folio was replaced by the dst folio.
*
* Return value:
* < 0 - error code
@@ -1026,78 +1054,40 @@ static int fallback_migrate_folio(struct address_space *mapping,
static int move_to_new_folio(struct folio *dst, struct folio *src,
enum migrate_mode mode)
{
+ struct address_space *mapping = folio_mapping(src);
int rc = -EAGAIN;
- bool is_lru = !__folio_test_movable(src);
VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
- if (likely(is_lru)) {
- struct address_space *mapping = folio_mapping(src);
-
- if (!mapping)
- rc = migrate_folio(mapping, dst, src, mode);
- else if (mapping_inaccessible(mapping))
- rc = -EOPNOTSUPP;
- else if (mapping->a_ops->migrate_folio)
- /*
- * Most folios have a mapping and most filesystems
- * provide a migrate_folio callback. Anonymous folios
- * are part of swap space which also has its own
- * migrate_folio callback. This is the most common path
- * for page migration.
- */
- rc = mapping->a_ops->migrate_folio(mapping, dst, src,
- mode);
- else
- rc = fallback_migrate_folio(mapping, dst, src, mode);
- } else {
- const struct movable_operations *mops;
-
+ if (!mapping)
+ rc = migrate_folio(mapping, dst, src, mode);
+ else if (mapping_inaccessible(mapping))
+ rc = -EOPNOTSUPP;
+ else if (mapping->a_ops->migrate_folio)
/*
- * In case of non-lru page, it could be released after
- * isolation step. In that case, we shouldn't try migration.
+ * Most folios have a mapping and most filesystems
+ * provide a migrate_folio callback. Anonymous folios
+ * are part of swap space which also has its own
+ * migrate_folio callback. This is the most common path
+ * for page migration.
*/
- VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
- if (!folio_test_movable(src)) {
- rc = MIGRATEPAGE_SUCCESS;
- folio_clear_isolated(src);
- goto out;
- }
-
- mops = folio_movable_ops(src);
- rc = mops->migrate_page(&dst->page, &src->page, mode);
- WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
- !folio_test_isolated(src));
- }
+ rc = mapping->a_ops->migrate_folio(mapping, dst, src,
+ mode);
+ else
+ rc = fallback_migrate_folio(mapping, dst, src, mode);
- /*
- * When successful, old pagecache src->mapping must be cleared before
- * src is freed; but stats require that PageAnon be left as PageAnon.
- */
if (rc == MIGRATEPAGE_SUCCESS) {
- if (__folio_test_movable(src)) {
- VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
-
- /*
- * We clear PG_movable under page_lock so any compactor
- * cannot try to migrate this page.
- */
- folio_clear_isolated(src);
- }
-
/*
- * Anonymous and movable src->mapping will be cleared by
- * free_pages_prepare so don't reset it here for keeping
- * the type to work PageAnon, for example.
+ * For pagecache folios, src->mapping must be cleared before src
+ * is freed. Anonymous folios must stay anonymous until freed.
*/
- if (!folio_mapping_flags(src))
+ if (!folio_test_anon(src))
src->mapping = NULL;
if (likely(!folio_is_zone_device(dst)))
flush_dcache_folio(dst);
}
-out:
return rc;
}
@@ -1164,12 +1154,7 @@ static void migrate_folio_undo_dst(struct folio *dst, bool locked,
static void migrate_folio_done(struct folio *src,
enum migrate_reason reason)
{
- /*
- * Compaction can migrate also non-LRU pages which are
- * not accounted to NR_ISOLATED_*. They can be recognized
- * as __folio_test_movable
- */
- if (likely(!__folio_test_movable(src)) && reason != MR_DEMOTION)
+ if (likely(!page_has_movable_ops(&src->page)) && reason != MR_DEMOTION)
mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
folio_is_file_lru(src), -folio_nr_pages(src));
@@ -1188,7 +1173,6 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
int rc = -EAGAIN;
int old_page_state = 0;
struct anon_vma *anon_vma = NULL;
- bool is_lru = data_race(!__folio_test_movable(src));
bool locked = false;
bool dst_locked = false;
@@ -1289,7 +1273,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
goto out;
dst_locked = true;
- if (unlikely(!is_lru)) {
+ if (unlikely(page_has_movable_ops(&src->page))) {
__migrate_folio_record(dst, old_page_state, anon_vma);
return MIGRATEPAGE_UNMAP;
}
@@ -1348,20 +1332,23 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
int rc;
int old_page_state = 0;
struct anon_vma *anon_vma = NULL;
- bool is_lru = !__folio_test_movable(src);
struct list_head *prev;
__migrate_folio_extract(dst, &old_page_state, &anon_vma);
prev = dst->lru.prev;
list_del(&dst->lru);
+ if (unlikely(page_has_movable_ops(&src->page))) {
+ rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
+ if (rc)
+ goto out;
+ goto out_unlock_both;
+ }
+
rc = move_to_new_folio(dst, src, mode);
if (rc)
goto out;
- if (unlikely(!is_lru))
- goto out_unlock_both;
-
/*
* When successful, push dst to LRU immediately: so that if it
* turns out to be an mlocked page, remove_migration_ptes() will
@@ -1380,7 +1367,7 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
out_unlock_both:
folio_unlock(dst);
- set_page_owner_migrate_reason(&dst->page, reason);
+ folio_set_owner_migrate_reason(dst, reason);
/*
* If migration is successful, decrease refcount of dst,
* which will not free the page because new page owner increased
@@ -2376,13 +2363,6 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
}
/*
- * The move_pages() man page does not have an -EEXIST choice, so
- * use -EFAULT instead.
- */
- if (err == -EEXIST)
- err = -EFAULT;
-
- /*
* If the page is already on the target node (!err), store the
* node, otherwise, store the err.
*/
@@ -2456,6 +2436,7 @@ set_status:
static int get_compat_pages_array(const void __user *chunk_pages[],
const void __user * __user *pages,
+ unsigned long chunk_offset,
unsigned long chunk_nr)
{
compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
@@ -2463,7 +2444,7 @@ static int get_compat_pages_array(const void __user *chunk_pages[],
int i;
for (i = 0; i < chunk_nr; i++) {
- if (get_user(p, pages32 + i))
+ if (get_user(p, pages32 + chunk_offset + i))
return -EFAULT;
chunk_pages[i] = compat_ptr(p);
}
@@ -2482,27 +2463,28 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
#define DO_PAGES_STAT_CHUNK_NR 16UL
const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
int chunk_status[DO_PAGES_STAT_CHUNK_NR];
+ unsigned long chunk_offset = 0;
while (nr_pages) {
unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
if (in_compat_syscall()) {
if (get_compat_pages_array(chunk_pages, pages,
- chunk_nr))
+ chunk_offset, chunk_nr))
break;
} else {
- if (copy_from_user(chunk_pages, pages,
+ if (copy_from_user(chunk_pages, pages + chunk_offset,
chunk_nr * sizeof(*chunk_pages)))
break;
}
do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
- if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
+ if (copy_to_user(status + chunk_offset, chunk_status,
+ chunk_nr * sizeof(*status)))
break;
- pages += chunk_nr;
- status += chunk_nr;
+ chunk_offset += chunk_nr;
nr_pages -= chunk_nr;
}
return nr_pages ? -EFAULT : 0;