diff options
Diffstat (limited to 'mm/damon/paddr.c')
-rw-r--r-- | mm/damon/paddr.c | 277 |
1 files changed, 2 insertions, 275 deletions
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 4102a8c5f992..53a55c5114fb 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -13,51 +13,11 @@ #include <linux/rmap.h> #include <linux/swap.h> #include <linux/memory-tiers.h> -#include <linux/migrate.h> #include <linux/mm_inline.h> #include "../internal.h" #include "ops-common.h" -static bool damon_folio_mkold_one(struct folio *folio, - struct vm_area_struct *vma, unsigned long addr, void *arg) -{ - DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); - - while (page_vma_mapped_walk(&pvmw)) { - addr = pvmw.address; - if (pvmw.pte) - damon_ptep_mkold(pvmw.pte, vma, addr); - else - damon_pmdp_mkold(pvmw.pmd, vma, addr); - } - return true; -} - -static void damon_folio_mkold(struct folio *folio) -{ - struct rmap_walk_control rwc = { - .rmap_one = damon_folio_mkold_one, - .anon_lock = folio_lock_anon_vma_read, - }; - bool need_lock; - - if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { - folio_set_idle(folio); - return; - } - - need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); - if (need_lock && !folio_trylock(folio)) - return; - - rmap_walk(folio, &rwc); - - if (need_lock) - folio_unlock(folio); - -} - static void damon_pa_mkold(unsigned long paddr) { struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); @@ -87,75 +47,6 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx) } } -static bool damon_folio_young_one(struct folio *folio, - struct vm_area_struct *vma, unsigned long addr, void *arg) -{ - bool *accessed = arg; - DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); - pte_t pte; - - *accessed = false; - while (page_vma_mapped_walk(&pvmw)) { - addr = pvmw.address; - if (pvmw.pte) { - pte = ptep_get(pvmw.pte); - - /* - * PFN swap PTEs, such as device-exclusive ones, that - * actually map pages are "old" from a CPU perspective. - * The MMU notifier takes care of any device aspects. - */ - *accessed = (pte_present(pte) && pte_young(pte)) || - !folio_test_idle(folio) || - mmu_notifier_test_young(vma->vm_mm, addr); - } else { -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - *accessed = pmd_young(pmdp_get(pvmw.pmd)) || - !folio_test_idle(folio) || - mmu_notifier_test_young(vma->vm_mm, addr); -#else - WARN_ON_ONCE(1); -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - } - if (*accessed) { - page_vma_mapped_walk_done(&pvmw); - break; - } - } - - /* If accessed, stop walking */ - return *accessed == false; -} - -static bool damon_folio_young(struct folio *folio) -{ - bool accessed = false; - struct rmap_walk_control rwc = { - .arg = &accessed, - .rmap_one = damon_folio_young_one, - .anon_lock = folio_lock_anon_vma_read, - }; - bool need_lock; - - if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { - if (folio_test_idle(folio)) - return false; - else - return true; - } - - need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); - if (need_lock && !folio_trylock(folio)) - return false; - - rmap_walk(folio, &rwc); - - if (need_lock) - folio_unlock(folio); - - return accessed; -} - static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz) { struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); @@ -206,49 +97,6 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx) return max_nr_accesses; } -static bool damos_pa_filter_match(struct damos_filter *filter, - struct folio *folio) -{ - bool matched = false; - struct mem_cgroup *memcg; - size_t folio_sz; - - switch (filter->type) { - case DAMOS_FILTER_TYPE_ANON: - matched = folio_test_anon(folio); - break; - case DAMOS_FILTER_TYPE_ACTIVE: - matched = folio_test_active(folio); - break; - case DAMOS_FILTER_TYPE_MEMCG: - rcu_read_lock(); - memcg = folio_memcg_check(folio); - if (!memcg) - matched = false; - else - matched = filter->memcg_id == mem_cgroup_id(memcg); - rcu_read_unlock(); - break; - case DAMOS_FILTER_TYPE_YOUNG: - matched = damon_folio_young(folio); - if (matched) - damon_folio_mkold(folio); - break; - case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: - folio_sz = folio_size(folio); - matched = filter->sz_range.min <= folio_sz && - folio_sz <= filter->sz_range.max; - break; - case DAMOS_FILTER_TYPE_UNMAPPED: - matched = !folio_mapped(folio) || !folio_raw_mapping(folio); - break; - default: - break; - } - - return matched == filter->matching; -} - /* * damos_pa_filter_out - Return true if the page should be filtered out. */ @@ -260,7 +108,7 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) return false; damos_for_each_ops_filter(filter, scheme) { - if (damos_pa_filter_match(filter, folio)) + if (damos_folio_filter_match(filter, folio)) return !filter->allow; } return scheme->ops_filters_default_reject; @@ -381,127 +229,6 @@ static unsigned long damon_pa_deactivate_pages(struct damon_region *r, sz_filter_passed); } -static unsigned int __damon_pa_migrate_folio_list( - struct list_head *migrate_folios, struct pglist_data *pgdat, - int target_nid) -{ - unsigned int nr_succeeded = 0; - nodemask_t allowed_mask = NODE_MASK_NONE; - struct migration_target_control mtc = { - /* - * Allocate from 'node', or fail quickly and quietly. - * When this happens, 'page' will likely just be discarded - * instead of migrated. - */ - .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | - __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT, - .nid = target_nid, - .nmask = &allowed_mask - }; - - if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE) - return 0; - - if (list_empty(migrate_folios)) - return 0; - - /* Migration ignores all cpuset and mempolicy settings */ - migrate_pages(migrate_folios, alloc_migrate_folio, NULL, - (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON, - &nr_succeeded); - - return nr_succeeded; -} - -static unsigned int damon_pa_migrate_folio_list(struct list_head *folio_list, - struct pglist_data *pgdat, - int target_nid) -{ - unsigned int nr_migrated = 0; - struct folio *folio; - LIST_HEAD(ret_folios); - LIST_HEAD(migrate_folios); - - while (!list_empty(folio_list)) { - struct folio *folio; - - cond_resched(); - - folio = lru_to_folio(folio_list); - list_del(&folio->lru); - - if (!folio_trylock(folio)) - goto keep; - - /* Relocate its contents to another node. */ - list_add(&folio->lru, &migrate_folios); - folio_unlock(folio); - continue; -keep: - list_add(&folio->lru, &ret_folios); - } - /* 'folio_list' is always empty here */ - - /* Migrate folios selected for migration */ - nr_migrated += __damon_pa_migrate_folio_list( - &migrate_folios, pgdat, target_nid); - /* - * Folios that could not be migrated are still in @migrate_folios. Add - * those back on @folio_list - */ - if (!list_empty(&migrate_folios)) - list_splice_init(&migrate_folios, folio_list); - - try_to_unmap_flush(); - - list_splice(&ret_folios, folio_list); - - while (!list_empty(folio_list)) { - folio = lru_to_folio(folio_list); - list_del(&folio->lru); - folio_putback_lru(folio); - } - - return nr_migrated; -} - -static unsigned long damon_pa_migrate_pages(struct list_head *folio_list, - int target_nid) -{ - int nid; - unsigned long nr_migrated = 0; - LIST_HEAD(node_folio_list); - unsigned int noreclaim_flag; - - if (list_empty(folio_list)) - return nr_migrated; - - noreclaim_flag = memalloc_noreclaim_save(); - - nid = folio_nid(lru_to_folio(folio_list)); - do { - struct folio *folio = lru_to_folio(folio_list); - - if (nid == folio_nid(folio)) { - list_move(&folio->lru, &node_folio_list); - continue; - } - - nr_migrated += damon_pa_migrate_folio_list(&node_folio_list, - NODE_DATA(nid), - target_nid); - nid = folio_nid(lru_to_folio(folio_list)); - } while (!list_empty(folio_list)); - - nr_migrated += damon_pa_migrate_folio_list(&node_folio_list, - NODE_DATA(nid), - target_nid); - - memalloc_noreclaim_restore(noreclaim_flag); - - return nr_migrated; -} - static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s, unsigned long *sz_filter_passed) { @@ -529,7 +256,7 @@ put_folio: addr += folio_size(folio); folio_put(folio); } - applied = damon_pa_migrate_pages(&folio_list, s->target_nid); + applied = damon_migrate_pages(&folio_list, s->target_nid); cond_resched(); s->last_applied = folio; return applied * PAGE_SIZE; |