diff options
Diffstat (limited to 'mm/damon/paddr.c')
| -rw-r--r-- | mm/damon/paddr.c | 459 |
1 files changed, 285 insertions, 174 deletions
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 5e8244f65a1a..07a8aead439e 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * DAMON Primitives for The Physical Address Space + * DAMON Code for The Physical Address Space * * Author: SeongJae Park <sj@kernel.org> */ @@ -12,65 +12,48 @@ #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/swap.h> +#include <linux/memory-tiers.h> +#include <linux/mm_inline.h> #include "../internal.h" -#include "prmtv-common.h" +#include "ops-common.h" -static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma, - unsigned long addr, void *arg) +static phys_addr_t damon_pa_phys_addr( + unsigned long addr, unsigned long addr_unit) { - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = addr, - }; + return (phys_addr_t)addr * addr_unit; +} - while (page_vma_mapped_walk(&pvmw)) { - addr = pvmw.address; - if (pvmw.pte) - damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr); - else - damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr); - } - return true; +static unsigned long damon_pa_core_addr( + phys_addr_t pa, unsigned long addr_unit) +{ + /* + * Use div_u64() for avoiding linking errors related with __udivdi3, + * __aeabi_uldivmod, or similar problems. This should also improve the + * performance optimization (read div_u64() comment for the detail). + */ + if (sizeof(pa) == 8 && sizeof(addr_unit) == 4) + return div_u64(pa, addr_unit); + return pa / addr_unit; } -static void damon_pa_mkold(unsigned long paddr) +static void damon_pa_mkold(phys_addr_t paddr) { - struct page *page = damon_get_page(PHYS_PFN(paddr)); - struct rmap_walk_control rwc = { - .rmap_one = __damon_pa_mkold, - .anon_lock = page_lock_anon_vma_read, - }; - bool need_lock; + struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); - if (!page) + if (!folio) return; - if (!page_mapped(page) || !page_rmapping(page)) { - set_page_idle(page); - goto out; - } - - need_lock = !PageAnon(page) || PageKsm(page); - if (need_lock && !trylock_page(page)) - goto out; - - rmap_walk(page, &rwc); - - if (need_lock) - unlock_page(page); - -out: - put_page(page); + damon_folio_mkold(folio); + folio_put(folio); } -static void __damon_pa_prepare_access_check(struct damon_ctx *ctx, - struct damon_region *r) +static void __damon_pa_prepare_access_check(struct damon_region *r, + unsigned long addr_unit) { r->sampling_addr = damon_rand(r->ar.start, r->ar.end); - damon_pa_mkold(r->sampling_addr); + damon_pa_mkold(damon_pa_phys_addr(r->sampling_addr, addr_unit)); } static void damon_pa_prepare_access_checks(struct damon_ctx *ctx) @@ -80,116 +63,44 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx) damon_for_each_target(t, ctx) { damon_for_each_region(r, t) - __damon_pa_prepare_access_check(ctx, r); - } -} - -struct damon_pa_access_chk_result { - unsigned long page_sz; - bool accessed; -}; - -static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma, - unsigned long addr, void *arg) -{ - struct damon_pa_access_chk_result *result = arg; - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = addr, - }; - - result->accessed = false; - result->page_sz = PAGE_SIZE; - while (page_vma_mapped_walk(&pvmw)) { - addr = pvmw.address; - if (pvmw.pte) { - result->accessed = pte_young(*pvmw.pte) || - !page_is_idle(page) || - mmu_notifier_test_young(vma->vm_mm, addr); - } else { -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - result->accessed = pmd_young(*pvmw.pmd) || - !page_is_idle(page) || - mmu_notifier_test_young(vma->vm_mm, addr); - result->page_sz = ((1UL) << HPAGE_PMD_SHIFT); -#else - WARN_ON_ONCE(1); -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - } - if (result->accessed) { - page_vma_mapped_walk_done(&pvmw); - break; - } + __damon_pa_prepare_access_check(r, ctx->addr_unit); } - - /* If accessed, stop walking */ - return !result->accessed; } -static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz) +static bool damon_pa_young(phys_addr_t paddr, unsigned long *folio_sz) { - struct page *page = damon_get_page(PHYS_PFN(paddr)); - struct damon_pa_access_chk_result result = { - .page_sz = PAGE_SIZE, - .accessed = false, - }; - struct rmap_walk_control rwc = { - .arg = &result, - .rmap_one = __damon_pa_young, - .anon_lock = page_lock_anon_vma_read, - }; - bool need_lock; + struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); + bool accessed; - if (!page) + if (!folio) return false; - if (!page_mapped(page) || !page_rmapping(page)) { - if (page_is_idle(page)) - result.accessed = false; - else - result.accessed = true; - put_page(page); - goto out; - } - - need_lock = !PageAnon(page) || PageKsm(page); - if (need_lock && !trylock_page(page)) { - put_page(page); - return NULL; - } - - rmap_walk(page, &rwc); - - if (need_lock) - unlock_page(page); - put_page(page); - -out: - *page_sz = result.page_sz; - return result.accessed; + accessed = damon_folio_young(folio); + *folio_sz = folio_size(folio); + folio_put(folio); + return accessed; } -static void __damon_pa_check_access(struct damon_ctx *ctx, - struct damon_region *r) +static void __damon_pa_check_access(struct damon_region *r, + struct damon_attrs *attrs, unsigned long addr_unit) { - static unsigned long last_addr; - static unsigned long last_page_sz = PAGE_SIZE; + static phys_addr_t last_addr; + static unsigned long last_folio_sz = PAGE_SIZE; static bool last_accessed; + phys_addr_t sampling_addr = damon_pa_phys_addr( + r->sampling_addr, addr_unit); /* If the region is in the last checked page, reuse the result */ - if (ALIGN_DOWN(last_addr, last_page_sz) == - ALIGN_DOWN(r->sampling_addr, last_page_sz)) { - if (last_accessed) - r->nr_accesses++; + if (ALIGN_DOWN(last_addr, last_folio_sz) == + ALIGN_DOWN(sampling_addr, last_folio_sz)) { + damon_update_region_access_rate(r, last_accessed, attrs); return; } - last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz); - if (last_accessed) - r->nr_accesses++; + last_accessed = damon_pa_young(sampling_addr, &last_folio_sz); + damon_update_region_access_rate(r, last_accessed, attrs); - last_addr = r->sampling_addr; + last_addr = sampling_addr; } static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx) @@ -200,7 +111,8 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx) damon_for_each_target(t, ctx) { damon_for_each_region(r, t) { - __damon_pa_check_access(ctx, r); + __damon_pa_check_access( + r, &ctx->attrs, ctx->addr_unit); max_nr_accesses = max(r->nr_accesses, max_nr_accesses); } } @@ -208,43 +120,228 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx) return max_nr_accesses; } -bool damon_pa_target_valid(void *t) +/* + * damos_pa_filter_out - Return true if the page should be filtered out. + */ +static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) { - return true; + struct damos_filter *filter; + + if (scheme->core_filters_allowed) + return false; + + damos_for_each_ops_filter(filter, scheme) { + if (damos_folio_filter_match(filter, folio)) + return !filter->allow; + } + return scheme->ops_filters_default_reject; } -static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, - struct damon_target *t, struct damon_region *r, - struct damos *scheme) +static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s) { - unsigned long addr, applied; - LIST_HEAD(page_list); - - if (scheme->action != DAMOS_PAGEOUT) - return 0; + if (!folio) + return true; + if (folio == s->last_applied) { + folio_put(folio); + return true; + } + return false; +} - for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { - struct page *page = damon_get_page(PHYS_PFN(addr)); +static unsigned long damon_pa_pageout(struct damon_region *r, + unsigned long addr_unit, struct damos *s, + unsigned long *sz_filter_passed) +{ + phys_addr_t addr, applied; + LIST_HEAD(folio_list); + bool install_young_filter = true; + struct damos_filter *filter; + struct folio *folio; + + /* check access in page level again by default */ + damos_for_each_ops_filter(filter, s) { + if (filter->type == DAMOS_FILTER_TYPE_YOUNG) { + install_young_filter = false; + break; + } + } + if (install_young_filter) { + filter = damos_new_filter( + DAMOS_FILTER_TYPE_YOUNG, true, false); + if (!filter) + return 0; + damos_add_filter(s, filter); + } - if (!page) + addr = damon_pa_phys_addr(r->ar.start, addr_unit); + while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { + addr += PAGE_SIZE; continue; + } + + if (damos_pa_filter_out(s, folio)) + goto put_folio; + else + *sz_filter_passed += folio_size(folio) / addr_unit; + + folio_clear_referenced(folio); + folio_test_clear_young(folio); + if (!folio_isolate_lru(folio)) + goto put_folio; + if (folio_test_unevictable(folio)) + folio_putback_lru(folio); + else + list_add(&folio->lru, &folio_list); +put_folio: + addr += folio_size(folio); + folio_put(folio); + } + if (install_young_filter) + damos_destroy_filter(filter); + applied = reclaim_pages(&folio_list); + cond_resched(); + s->last_applied = folio; + return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit); +} - ClearPageReferenced(page); - test_and_clear_page_young(page); - if (isolate_lru_page(page)) { - put_page(page); +static inline unsigned long damon_pa_mark_accessed_or_deactivate( + struct damon_region *r, unsigned long addr_unit, + struct damos *s, bool mark_accessed, + unsigned long *sz_filter_passed) +{ + phys_addr_t addr, applied = 0; + struct folio *folio; + + addr = damon_pa_phys_addr(r->ar.start, addr_unit); + while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { + addr += PAGE_SIZE; continue; } - if (PageUnevictable(page)) { - putback_lru_page(page); - } else { - list_add(&page->lru, &page_list); - put_page(page); + + if (damos_pa_filter_out(s, folio)) + goto put_folio; + else + *sz_filter_passed += folio_size(folio) / addr_unit; + + if (mark_accessed) + folio_mark_accessed(folio); + else + folio_deactivate(folio); + applied += folio_nr_pages(folio); +put_folio: + addr += folio_size(folio); + folio_put(folio); + } + s->last_applied = folio; + return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit); +} + +static unsigned long damon_pa_mark_accessed(struct damon_region *r, + unsigned long addr_unit, struct damos *s, + unsigned long *sz_filter_passed) +{ + return damon_pa_mark_accessed_or_deactivate(r, addr_unit, s, true, + sz_filter_passed); +} + +static unsigned long damon_pa_deactivate_pages(struct damon_region *r, + unsigned long addr_unit, struct damos *s, + unsigned long *sz_filter_passed) +{ + return damon_pa_mark_accessed_or_deactivate(r, addr_unit, s, false, + sz_filter_passed); +} + +static unsigned long damon_pa_migrate(struct damon_region *r, + unsigned long addr_unit, struct damos *s, + unsigned long *sz_filter_passed) +{ + phys_addr_t addr, applied; + LIST_HEAD(folio_list); + struct folio *folio; + + addr = damon_pa_phys_addr(r->ar.start, addr_unit); + while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { + addr += PAGE_SIZE; + continue; } + + if (damos_pa_filter_out(s, folio)) + goto put_folio; + else + *sz_filter_passed += folio_size(folio) / addr_unit; + + if (!folio_isolate_lru(folio)) + goto put_folio; + list_add(&folio->lru, &folio_list); +put_folio: + addr += folio_size(folio); + folio_put(folio); } - applied = reclaim_pages(&page_list); + applied = damon_migrate_pages(&folio_list, s->target_nid); cond_resched(); - return applied * PAGE_SIZE; + s->last_applied = folio; + return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit); +} + +static unsigned long damon_pa_stat(struct damon_region *r, + unsigned long addr_unit, struct damos *s, + unsigned long *sz_filter_passed) +{ + phys_addr_t addr; + struct folio *folio; + + if (!damos_ops_has_filter(s)) + return 0; + + addr = damon_pa_phys_addr(r->ar.start, addr_unit); + while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { + addr += PAGE_SIZE; + continue; + } + + if (!damos_pa_filter_out(s, folio)) + *sz_filter_passed += folio_size(folio) / addr_unit; + addr += folio_size(folio); + folio_put(folio); + } + s->last_applied = folio; + return 0; +} + +static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, + struct damon_target *t, struct damon_region *r, + struct damos *scheme, unsigned long *sz_filter_passed) +{ + unsigned long aunit = ctx->addr_unit; + + switch (scheme->action) { + case DAMOS_PAGEOUT: + return damon_pa_pageout(r, aunit, scheme, sz_filter_passed); + case DAMOS_LRU_PRIO: + return damon_pa_mark_accessed(r, aunit, scheme, + sz_filter_passed); + case DAMOS_LRU_DEPRIO: + return damon_pa_deactivate_pages(r, aunit, scheme, + sz_filter_passed); + case DAMOS_MIGRATE_HOT: + case DAMOS_MIGRATE_COLD: + return damon_pa_migrate(r, aunit, scheme, sz_filter_passed); + case DAMOS_STAT: + return damon_pa_stat(r, aunit, scheme, sz_filter_passed); + default: + /* DAMOS actions that not yet supported by 'paddr'. */ + break; + } + return 0; } static int damon_pa_scheme_score(struct damon_ctx *context, @@ -253,7 +350,15 @@ static int damon_pa_scheme_score(struct damon_ctx *context, { switch (scheme->action) { case DAMOS_PAGEOUT: - return damon_pageout_score(context, r, scheme); + return damon_cold_score(context, r, scheme); + case DAMOS_LRU_PRIO: + return damon_hot_score(context, r, scheme); + case DAMOS_LRU_DEPRIO: + return damon_cold_score(context, r, scheme); + case DAMOS_MIGRATE_HOT: + return damon_hot_score(context, r, scheme); + case DAMOS_MIGRATE_COLD: + return damon_cold_score(context, r, scheme); default: break; } @@ -261,15 +366,21 @@ static int damon_pa_scheme_score(struct damon_ctx *context, return DAMOS_MAX_SCORE; } -void damon_pa_set_primitives(struct damon_ctx *ctx) +static int __init damon_pa_initcall(void) { - ctx->primitive.init = NULL; - ctx->primitive.update = NULL; - ctx->primitive.prepare_access_checks = damon_pa_prepare_access_checks; - ctx->primitive.check_accesses = damon_pa_check_accesses; - ctx->primitive.reset_aggregated = NULL; - ctx->primitive.target_valid = damon_pa_target_valid; - ctx->primitive.cleanup = NULL; - ctx->primitive.apply_scheme = damon_pa_apply_scheme; - ctx->primitive.get_scheme_score = damon_pa_scheme_score; -} + struct damon_operations ops = { + .id = DAMON_OPS_PADDR, + .init = NULL, + .update = NULL, + .prepare_access_checks = damon_pa_prepare_access_checks, + .check_accesses = damon_pa_check_accesses, + .target_valid = NULL, + .cleanup = NULL, + .apply_scheme = damon_pa_apply_scheme, + .get_scheme_score = damon_pa_scheme_score, + }; + + return damon_register_ops(&ops); +}; + +subsys_initcall(damon_pa_initcall); |
