diff options
-rw-r--r-- | mm/damon/vaddr.c | 69 |
1 files changed, 65 insertions, 4 deletions
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 47d5f33f89c8..7f5dc9c221a0 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -611,9 +611,60 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx) return max_nr_accesses; } +static bool damos_va_filter_young_match(struct damos_filter *filter, + struct folio *folio, struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, pmd_t *pmdp) +{ + bool young = false; + + if (ptep) + young = pte_young(ptep_get(ptep)); + else if (pmdp) + young = pmd_young(pmdp_get(pmdp)); + + young = young || !folio_test_idle(folio) || + mmu_notifier_test_young(vma->vm_mm, addr); + + if (young && ptep) + damon_ptep_mkold(ptep, vma, addr); + else if (young && pmdp) + damon_pmdp_mkold(pmdp, vma, addr); + + return young == filter->matching; +} + +static bool damos_va_filter_out(struct damos *scheme, struct folio *folio, + struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pmd_t *pmdp) +{ + struct damos_filter *filter; + bool matched; + + if (scheme->core_filters_allowed) + return false; + + damos_for_each_ops_filter(filter, scheme) { + /* + * damos_folio_filter_match checks the young filter by doing an + * rmap on the folio to find its page table. However, being the + * vaddr scheme, we have direct access to the page tables, so + * use that instead. + */ + if (filter->type == DAMOS_FILTER_TYPE_YOUNG) + matched = damos_va_filter_young_match(filter, folio, + vma, addr, ptep, pmdp); + else + matched = damos_folio_filter_match(filter, folio); + + if (matched) + return !filter->allow; + } + return scheme->ops_filters_default_reject; +} + struct damos_va_migrate_private { struct list_head *migration_lists; - struct damos_migrate_dests *dests; + struct damos *scheme; }; /* @@ -673,7 +724,8 @@ static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr, { struct damos_va_migrate_private *priv = walk->private; struct list_head *migration_lists = priv->migration_lists; - struct damos_migrate_dests *dests = priv->dests; + struct damos *s = priv->scheme; + struct damos_migrate_dests *dests = &s->migrate_dests; struct folio *folio; spinlock_t *ptl; pmd_t pmde; @@ -691,9 +743,13 @@ static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr, if (!folio) goto unlock; + if (damos_va_filter_out(s, folio, walk->vma, addr, NULL, pmd)) + goto put_folio; + damos_va_migrate_dests_add(folio, walk->vma, addr, dests, migration_lists); +put_folio: folio_put(folio); unlock: spin_unlock(ptl); @@ -708,7 +764,8 @@ static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr, { struct damos_va_migrate_private *priv = walk->private; struct list_head *migration_lists = priv->migration_lists; - struct damos_migrate_dests *dests = priv->dests; + struct damos *s = priv->scheme; + struct damos_migrate_dests *dests = &s->migrate_dests; struct folio *folio; pte_t ptent; @@ -720,9 +777,13 @@ static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr, if (!folio) return 0; + if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL)) + goto put_folio; + damos_va_migrate_dests_add(folio, walk->vma, addr, dests, migration_lists); +put_folio: folio_put(folio); return 0; } @@ -790,7 +851,7 @@ static unsigned long damos_va_migrate(struct damon_target *target, use_target_nid = dests->nr_dests == 0; nr_dests = use_target_nid ? 1 : dests->nr_dests; - priv.dests = dests; + priv.scheme = s; priv.migration_lists = kmalloc_array(nr_dests, sizeof(*priv.migration_lists), GFP_KERNEL); if (!priv.migration_lists) |