diff options
author | Bijan Tabatabai <bijantabatab@micron.com> | 2025-07-08 19:59:43 -0500 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2025-07-19 18:59:51 -0700 |
commit | db87a4e236424249c7def768ba50b88699af2c0d (patch) | |
tree | c510f26fd03544435719a4575d54c2b31a7a0b0f | |
parent | 0a707d6b04e01490f6c246fa4b6e643cc33b40a1 (diff) |
mm/damon/vaddr: apply filters in migrate_{hot/cold}
The paddr versions of migrate_{hot/cold} filter out folios from migration
based on the scheme's filters. This patch does the same for the vaddr
versions of those schemes.
The filtering code is mostly the same for the paddr and vaddr versions.
The exception is the young filter. paddr determines if a page is young by
doing a folio rmap walk to find the page table entries corresponding to
the folio. However, vaddr schemes have easier access to the page tables,
so we add some logic to avoid the extra work.
Link: https://lkml.kernel.org/r/20250709005952.17776-14-bijan311@gmail.com
Co-developed-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
Signed-off-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
Signed-off-by: Bijan Tabatabai <bijantabatab@micron.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | mm/damon/vaddr.c | 69 |
1 files changed, 65 insertions, 4 deletions
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 47d5f33f89c8..7f5dc9c221a0 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -611,9 +611,60 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx) return max_nr_accesses; } +static bool damos_va_filter_young_match(struct damos_filter *filter, + struct folio *folio, struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, pmd_t *pmdp) +{ + bool young = false; + + if (ptep) + young = pte_young(ptep_get(ptep)); + else if (pmdp) + young = pmd_young(pmdp_get(pmdp)); + + young = young || !folio_test_idle(folio) || + mmu_notifier_test_young(vma->vm_mm, addr); + + if (young && ptep) + damon_ptep_mkold(ptep, vma, addr); + else if (young && pmdp) + damon_pmdp_mkold(pmdp, vma, addr); + + return young == filter->matching; +} + +static bool damos_va_filter_out(struct damos *scheme, struct folio *folio, + struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pmd_t *pmdp) +{ + struct damos_filter *filter; + bool matched; + + if (scheme->core_filters_allowed) + return false; + + damos_for_each_ops_filter(filter, scheme) { + /* + * damos_folio_filter_match checks the young filter by doing an + * rmap on the folio to find its page table. However, being the + * vaddr scheme, we have direct access to the page tables, so + * use that instead. + */ + if (filter->type == DAMOS_FILTER_TYPE_YOUNG) + matched = damos_va_filter_young_match(filter, folio, + vma, addr, ptep, pmdp); + else + matched = damos_folio_filter_match(filter, folio); + + if (matched) + return !filter->allow; + } + return scheme->ops_filters_default_reject; +} + struct damos_va_migrate_private { struct list_head *migration_lists; - struct damos_migrate_dests *dests; + struct damos *scheme; }; /* @@ -673,7 +724,8 @@ static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr, { struct damos_va_migrate_private *priv = walk->private; struct list_head *migration_lists = priv->migration_lists; - struct damos_migrate_dests *dests = priv->dests; + struct damos *s = priv->scheme; + struct damos_migrate_dests *dests = &s->migrate_dests; struct folio *folio; spinlock_t *ptl; pmd_t pmde; @@ -691,9 +743,13 @@ static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr, if (!folio) goto unlock; + if (damos_va_filter_out(s, folio, walk->vma, addr, NULL, pmd)) + goto put_folio; + damos_va_migrate_dests_add(folio, walk->vma, addr, dests, migration_lists); +put_folio: folio_put(folio); unlock: spin_unlock(ptl); @@ -708,7 +764,8 @@ static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr, { struct damos_va_migrate_private *priv = walk->private; struct list_head *migration_lists = priv->migration_lists; - struct damos_migrate_dests *dests = priv->dests; + struct damos *s = priv->scheme; + struct damos_migrate_dests *dests = &s->migrate_dests; struct folio *folio; pte_t ptent; @@ -720,9 +777,13 @@ static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr, if (!folio) return 0; + if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL)) + goto put_folio; + damos_va_migrate_dests_add(folio, walk->vma, addr, dests, migration_lists); +put_folio: folio_put(folio); return 0; } @@ -790,7 +851,7 @@ static unsigned long damos_va_migrate(struct damon_target *target, use_target_nid = dests->nr_dests == 0; nr_dests = use_target_nid ? 1 : dests->nr_dests; - priv.dests = dests; + priv.scheme = s; priv.migration_lists = kmalloc_array(nr_dests, sizeof(*priv.migration_lists), GFP_KERNEL); if (!priv.migration_lists) |