summaryrefslogtreecommitdiff
path: root/mm/damon/paddr.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/damon/paddr.c')
-rw-r--r--mm/damon/paddr.c316
1 files changed, 278 insertions, 38 deletions
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 081e2a325778..c834aa217835 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -12,12 +12,15 @@
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/swap.h>
+#include <linux/memory-tiers.h>
+#include <linux/migrate.h>
+#include <linux/mm_inline.h>
#include "../internal.h"
#include "ops-common.h"
-static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
- unsigned long addr, void *arg)
+static bool damon_folio_mkold_one(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr, void *arg)
{
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
@@ -31,33 +34,38 @@ static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
return true;
}
-static void damon_pa_mkold(unsigned long paddr)
+static void damon_folio_mkold(struct folio *folio)
{
- struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
struct rmap_walk_control rwc = {
- .rmap_one = __damon_pa_mkold,
+ .rmap_one = damon_folio_mkold_one,
.anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;
- if (!folio)
- return;
-
if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
folio_set_idle(folio);
- goto out;
+ return;
}
need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
if (need_lock && !folio_trylock(folio))
- goto out;
+ return;
rmap_walk(folio, &rwc);
if (need_lock)
folio_unlock(folio);
-out:
+}
+
+static void damon_pa_mkold(unsigned long paddr)
+{
+ struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
+
+ if (!folio)
+ return;
+
+ damon_folio_mkold(folio);
folio_put(folio);
}
@@ -79,8 +87,8 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
}
}
-static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
- unsigned long addr, void *arg)
+static bool damon_folio_young_one(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr, void *arg)
{
bool *accessed = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
@@ -111,38 +119,44 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
return *accessed == false;
}
-static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
+static bool damon_folio_young(struct folio *folio)
{
- struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
bool accessed = false;
struct rmap_walk_control rwc = {
.arg = &accessed,
- .rmap_one = __damon_pa_young,
+ .rmap_one = damon_folio_young_one,
.anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;
- if (!folio)
- return false;
-
if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
if (folio_test_idle(folio))
- accessed = false;
+ return false;
else
- accessed = true;
- goto out;
+ return true;
}
need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
if (need_lock && !folio_trylock(folio))
- goto out;
+ return false;
rmap_walk(folio, &rwc);
if (need_lock)
folio_unlock(folio);
-out:
+ return accessed;
+}
+
+static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
+{
+ struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
+ bool accessed;
+
+ if (!folio)
+ return false;
+
+ accessed = damon_folio_young(folio);
*folio_sz = folio_size(folio);
folio_put(folio);
return accessed;
@@ -184,7 +198,7 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
return max_nr_accesses;
}
-static bool __damos_pa_filter_out(struct damos_filter *filter,
+static bool damos_pa_filter_match(struct damos_filter *filter,
struct folio *folio)
{
bool matched = false;
@@ -203,6 +217,11 @@ static bool __damos_pa_filter_out(struct damos_filter *filter,
matched = filter->memcg_id == mem_cgroup_id(memcg);
rcu_read_unlock();
break;
+ case DAMOS_FILTER_TYPE_YOUNG:
+ matched = damon_folio_young(folio);
+ if (matched)
+ damon_folio_mkold(folio);
+ break;
default:
break;
}
@@ -217,17 +236,38 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
{
struct damos_filter *filter;
+ if (scheme->core_filters_allowed)
+ return false;
+
damos_for_each_filter(filter, scheme) {
- if (__damos_pa_filter_out(filter, folio))
- return true;
+ if (damos_pa_filter_match(filter, folio))
+ return !filter->allow;
}
return false;
}
-static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
+static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
+ unsigned long *sz_filter_passed)
{
unsigned long addr, applied;
LIST_HEAD(folio_list);
+ bool install_young_filter = true;
+ struct damos_filter *filter;
+
+ /* check access in page level again by default */
+ damos_for_each_filter(filter, s) {
+ if (filter->type == DAMOS_FILTER_TYPE_YOUNG) {
+ install_young_filter = false;
+ break;
+ }
+ }
+ if (install_young_filter) {
+ filter = damos_new_filter(
+ DAMOS_FILTER_TYPE_YOUNG, true, false);
+ if (!filter)
+ return 0;
+ damos_add_filter(s, filter);
+ }
for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
struct folio *folio = damon_get_folio(PHYS_PFN(addr));
@@ -237,6 +277,8 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
if (damos_pa_filter_out(s, folio))
goto put_folio;
+ else
+ *sz_filter_passed += folio_size(folio);
folio_clear_referenced(folio);
folio_test_clear_young(folio);
@@ -249,13 +291,16 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
put_folio:
folio_put(folio);
}
+ if (install_young_filter)
+ damos_destroy_filter(filter);
applied = reclaim_pages(&folio_list);
cond_resched();
return applied * PAGE_SIZE;
}
static inline unsigned long damon_pa_mark_accessed_or_deactivate(
- struct damon_region *r, struct damos *s, bool mark_accessed)
+ struct damon_region *r, struct damos *s, bool mark_accessed,
+ unsigned long *sz_filter_passed)
{
unsigned long addr, applied = 0;
@@ -267,6 +312,8 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
if (damos_pa_filter_out(s, folio))
goto put_folio;
+ else
+ *sz_filter_passed += folio_size(folio);
if (mark_accessed)
folio_mark_accessed(folio);
@@ -280,30 +327,219 @@ put_folio:
}
static unsigned long damon_pa_mark_accessed(struct damon_region *r,
- struct damos *s)
+ struct damos *s, unsigned long *sz_filter_passed)
{
- return damon_pa_mark_accessed_or_deactivate(r, s, true);
+ return damon_pa_mark_accessed_or_deactivate(r, s, true,
+ sz_filter_passed);
}
static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
- struct damos *s)
+ struct damos *s, unsigned long *sz_filter_passed)
+{
+ return damon_pa_mark_accessed_or_deactivate(r, s, false,
+ sz_filter_passed);
+}
+
+static unsigned int __damon_pa_migrate_folio_list(
+ struct list_head *migrate_folios, struct pglist_data *pgdat,
+ int target_nid)
+{
+ unsigned int nr_succeeded = 0;
+ nodemask_t allowed_mask = NODE_MASK_NONE;
+ struct migration_target_control mtc = {
+ /*
+ * Allocate from 'node', or fail quickly and quietly.
+ * When this happens, 'page' will likely just be discarded
+ * instead of migrated.
+ */
+ .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
+ __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT,
+ .nid = target_nid,
+ .nmask = &allowed_mask
+ };
+
+ if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE)
+ return 0;
+
+ if (list_empty(migrate_folios))
+ return 0;
+
+ /* Migration ignores all cpuset and mempolicy settings */
+ migrate_pages(migrate_folios, alloc_migrate_folio, NULL,
+ (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON,
+ &nr_succeeded);
+
+ return nr_succeeded;
+}
+
+static unsigned int damon_pa_migrate_folio_list(struct list_head *folio_list,
+ struct pglist_data *pgdat,
+ int target_nid)
+{
+ unsigned int nr_migrated = 0;
+ struct folio *folio;
+ LIST_HEAD(ret_folios);
+ LIST_HEAD(migrate_folios);
+
+ while (!list_empty(folio_list)) {
+ struct folio *folio;
+
+ cond_resched();
+
+ folio = lru_to_folio(folio_list);
+ list_del(&folio->lru);
+
+ if (!folio_trylock(folio))
+ goto keep;
+
+ /* Relocate its contents to another node. */
+ list_add(&folio->lru, &migrate_folios);
+ folio_unlock(folio);
+ continue;
+keep:
+ list_add(&folio->lru, &ret_folios);
+ }
+ /* 'folio_list' is always empty here */
+
+ /* Migrate folios selected for migration */
+ nr_migrated += __damon_pa_migrate_folio_list(
+ &migrate_folios, pgdat, target_nid);
+ /*
+ * Folios that could not be migrated are still in @migrate_folios. Add
+ * those back on @folio_list
+ */
+ if (!list_empty(&migrate_folios))
+ list_splice_init(&migrate_folios, folio_list);
+
+ try_to_unmap_flush();
+
+ list_splice(&ret_folios, folio_list);
+
+ while (!list_empty(folio_list)) {
+ folio = lru_to_folio(folio_list);
+ list_del(&folio->lru);
+ folio_putback_lru(folio);
+ }
+
+ return nr_migrated;
+}
+
+static unsigned long damon_pa_migrate_pages(struct list_head *folio_list,
+ int target_nid)
+{
+ int nid;
+ unsigned long nr_migrated = 0;
+ LIST_HEAD(node_folio_list);
+ unsigned int noreclaim_flag;
+
+ if (list_empty(folio_list))
+ return nr_migrated;
+
+ noreclaim_flag = memalloc_noreclaim_save();
+
+ nid = folio_nid(lru_to_folio(folio_list));
+ do {
+ struct folio *folio = lru_to_folio(folio_list);
+
+ if (nid == folio_nid(folio)) {
+ list_move(&folio->lru, &node_folio_list);
+ continue;
+ }
+
+ nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
+ NODE_DATA(nid),
+ target_nid);
+ nid = folio_nid(lru_to_folio(folio_list));
+ } while (!list_empty(folio_list));
+
+ nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
+ NODE_DATA(nid),
+ target_nid);
+
+ memalloc_noreclaim_restore(noreclaim_flag);
+
+ return nr_migrated;
+}
+
+static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s,
+ unsigned long *sz_filter_passed)
+{
+ unsigned long addr, applied;
+ LIST_HEAD(folio_list);
+
+ for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
+ struct folio *folio = damon_get_folio(PHYS_PFN(addr));
+
+ if (!folio)
+ continue;
+
+ if (damos_pa_filter_out(s, folio))
+ goto put_folio;
+ else
+ *sz_filter_passed += folio_size(folio);
+
+ if (!folio_isolate_lru(folio))
+ goto put_folio;
+ list_add(&folio->lru, &folio_list);
+put_folio:
+ folio_put(folio);
+ }
+ applied = damon_pa_migrate_pages(&folio_list, s->target_nid);
+ cond_resched();
+ return applied * PAGE_SIZE;
+}
+
+static bool damon_pa_scheme_has_filter(struct damos *s)
{
- return damon_pa_mark_accessed_or_deactivate(r, s, false);
+ struct damos_filter *f;
+
+ damos_for_each_filter(f, s)
+ return true;
+ return false;
+}
+
+static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s,
+ unsigned long *sz_filter_passed)
+{
+ unsigned long addr;
+ LIST_HEAD(folio_list);
+
+ if (!damon_pa_scheme_has_filter(s))
+ return 0;
+
+ addr = r->ar.start;
+ while (addr < r->ar.end) {
+ struct folio *folio = damon_get_folio(PHYS_PFN(addr));
+
+ if (!folio) {
+ addr += PAGE_SIZE;
+ continue;
+ }
+
+ if (!damos_pa_filter_out(s, folio))
+ *sz_filter_passed += folio_size(folio);
+ addr += folio_size(folio);
+ folio_put(folio);
+ }
+ return 0;
}
static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
struct damon_target *t, struct damon_region *r,
- struct damos *scheme)
+ struct damos *scheme, unsigned long *sz_filter_passed)
{
switch (scheme->action) {
case DAMOS_PAGEOUT:
- return damon_pa_pageout(r, scheme);
+ return damon_pa_pageout(r, scheme, sz_filter_passed);
case DAMOS_LRU_PRIO:
- return damon_pa_mark_accessed(r, scheme);
+ return damon_pa_mark_accessed(r, scheme, sz_filter_passed);
case DAMOS_LRU_DEPRIO:
- return damon_pa_deactivate_pages(r, scheme);
+ return damon_pa_deactivate_pages(r, scheme, sz_filter_passed);
+ case DAMOS_MIGRATE_HOT:
+ case DAMOS_MIGRATE_COLD:
+ return damon_pa_migrate(r, scheme, sz_filter_passed);
case DAMOS_STAT:
- break;
+ return damon_pa_stat(r, scheme, sz_filter_passed);
default:
/* DAMOS actions that not yet supported by 'paddr'. */
break;
@@ -322,6 +558,10 @@ static int damon_pa_scheme_score(struct damon_ctx *context,
return damon_hot_score(context, r, scheme);
case DAMOS_LRU_DEPRIO:
return damon_cold_score(context, r, scheme);
+ case DAMOS_MIGRATE_HOT:
+ return damon_hot_score(context, r, scheme);
+ case DAMOS_MIGRATE_COLD:
+ return damon_cold_score(context, r, scheme);
default:
break;
}