diff options
Diffstat (limited to 'mm/damon')
-rw-r--r-- | mm/damon/Kconfig | 2 | ||||
-rw-r--r-- | mm/damon/core.c | 111 | ||||
-rw-r--r-- | mm/damon/lru_sort.c | 51 | ||||
-rw-r--r-- | mm/damon/ops-common.c | 11 | ||||
-rw-r--r-- | mm/damon/ops-common.h | 2 | ||||
-rw-r--r-- | mm/damon/paddr.c | 130 | ||||
-rw-r--r-- | mm/damon/reclaim.c | 49 | ||||
-rw-r--r-- | mm/damon/stat.c | 26 | ||||
-rw-r--r-- | mm/damon/sysfs.c | 48 | ||||
-rw-r--r-- | mm/damon/tests/core-kunit.h | 38 | ||||
-rw-r--r-- | mm/damon/tests/vaddr-kunit.h | 2 | ||||
-rw-r--r-- | mm/damon/vaddr.c | 105 |
12 files changed, 458 insertions, 117 deletions
diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig index b3171f9406c1..8c868f7035fc 100644 --- a/mm/damon/Kconfig +++ b/mm/damon/Kconfig @@ -104,7 +104,7 @@ config DAMON_STAT config DAMON_STAT_ENABLED_DEFAULT bool "Enable DAMON_STAT by default" - depends on DAMON_PADDR + depends on DAMON_STAT default DAMON_STAT help Whether to enable DAMON_STAT by default. Users can disable it in diff --git a/mm/damon/core.c b/mm/damon/core.c index 08065b363972..93848b4c6944 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -201,6 +201,7 @@ static int damon_fill_regions_holes(struct damon_region *first, * @t: the given target. * @ranges: array of new monitoring target ranges. * @nr_ranges: length of @ranges. + * @min_sz_region: minimum region size. * * This function adds new regions to, or modify existing regions of a * monitoring target to fit in specific ranges. @@ -208,7 +209,7 @@ static int damon_fill_regions_holes(struct damon_region *first, * Return: 0 if success, or negative error code otherwise. */ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, - unsigned int nr_ranges) + unsigned int nr_ranges, unsigned long min_sz_region) { struct damon_region *r, *next; unsigned int i; @@ -245,16 +246,16 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, /* no region intersects with this range */ newr = damon_new_region( ALIGN_DOWN(range->start, - DAMON_MIN_REGION), - ALIGN(range->end, DAMON_MIN_REGION)); + min_sz_region), + ALIGN(range->end, min_sz_region)); if (!newr) return -ENOMEM; damon_insert_region(newr, damon_prev_region(r), r, t); } else { /* resize intersecting regions to fit in this range */ first->ar.start = ALIGN_DOWN(range->start, - DAMON_MIN_REGION); - last->ar.end = ALIGN(range->end, DAMON_MIN_REGION); + min_sz_region); + last->ar.end = ALIGN(range->end, min_sz_region); /* fill possible holes in the range */ err = damon_fill_regions_holes(first, last, t); @@ -544,6 +545,9 @@ struct damon_ctx *damon_new_ctx(void) ctx->attrs.min_nr_regions = 10; ctx->attrs.max_nr_regions = 1000; + ctx->addr_unit = 1; + ctx->min_sz_region = DAMON_MIN_REGION; + INIT_LIST_HEAD(&ctx->adaptive_targets); INIT_LIST_HEAD(&ctx->schemes); @@ -570,6 +574,23 @@ void damon_destroy_ctx(struct damon_ctx *ctx) kfree(ctx); } +static bool damon_attrs_equals(const struct damon_attrs *attrs1, + const struct damon_attrs *attrs2) +{ + const struct damon_intervals_goal *ig1 = &attrs1->intervals_goal; + const struct damon_intervals_goal *ig2 = &attrs2->intervals_goal; + + return attrs1->sample_interval == attrs2->sample_interval && + attrs1->aggr_interval == attrs2->aggr_interval && + attrs1->ops_update_interval == attrs2->ops_update_interval && + attrs1->min_nr_regions == attrs2->min_nr_regions && + attrs1->max_nr_regions == attrs2->max_nr_regions && + ig1->access_bp == ig2->access_bp && + ig1->aggrs == ig2->aggrs && + ig1->min_sample_us == ig2->min_sample_us && + ig1->max_sample_us == ig2->max_sample_us; +} + static unsigned int damon_age_for_new_attrs(unsigned int age, struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) { @@ -1108,8 +1129,8 @@ static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx) * * If @src has no region, @dst keeps current regions. */ -static int damon_commit_target_regions( - struct damon_target *dst, struct damon_target *src) +static int damon_commit_target_regions(struct damon_target *dst, + struct damon_target *src, unsigned long src_min_sz_region) { struct damon_region *src_region; struct damon_addr_range *ranges; @@ -1126,18 +1147,19 @@ static int damon_commit_target_regions( i = 0; damon_for_each_region(src_region, src) ranges[i++] = src_region->ar; - err = damon_set_regions(dst, ranges, i); + err = damon_set_regions(dst, ranges, i, src_min_sz_region); kfree(ranges); return err; } static int damon_commit_target( struct damon_target *dst, bool dst_has_pid, - struct damon_target *src, bool src_has_pid) + struct damon_target *src, bool src_has_pid, + unsigned long src_min_sz_region) { int err; - err = damon_commit_target_regions(dst, src); + err = damon_commit_target_regions(dst, src, src_min_sz_region); if (err) return err; if (dst_has_pid) @@ -1159,7 +1181,8 @@ static int damon_commit_targets( if (src_target) { err = damon_commit_target( dst_target, damon_target_has_pid(dst), - src_target, damon_target_has_pid(src)); + src_target, damon_target_has_pid(src), + src->min_sz_region); if (err) return err; } else { @@ -1182,7 +1205,8 @@ static int damon_commit_targets( if (!new_target) return -ENOMEM; err = damon_commit_target(new_target, false, - src_target, damon_target_has_pid(src)); + src_target, damon_target_has_pid(src), + src->min_sz_region); if (err) { damon_destroy_target(new_target, NULL); return err; @@ -1222,10 +1246,14 @@ int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src) * 2. ops update should be done after pid handling is done (target * committing require putting pids). */ - err = damon_set_attrs(dst, &src->attrs); - if (err) - return err; + if (!damon_attrs_equals(&dst->attrs, &src->attrs)) { + err = damon_set_attrs(dst, &src->attrs); + if (err) + return err; + } dst->ops = src->ops; + dst->addr_unit = src->addr_unit; + dst->min_sz_region = src->min_sz_region; return 0; } @@ -1258,8 +1286,8 @@ static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) if (ctx->attrs.min_nr_regions) sz /= ctx->attrs.min_nr_regions; - if (sz < DAMON_MIN_REGION) - sz = DAMON_MIN_REGION; + if (sz < ctx->min_sz_region) + sz = ctx->min_sz_region; return sz; } @@ -1603,6 +1631,7 @@ static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, * @t: The target of the region. * @rp: The pointer to the region. * @s: The scheme to be applied. + * @min_sz_region: minimum region size. * * If a quota of a scheme has exceeded in a quota charge window, the scheme's * action would applied to only a part of the target access pattern fulfilling @@ -1620,7 +1649,7 @@ static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, * Return: true if the region should be entirely skipped, false otherwise. */ static bool damos_skip_charged_region(struct damon_target *t, - struct damon_region **rp, struct damos *s) + struct damon_region **rp, struct damos *s, unsigned long min_sz_region) { struct damon_region *r = *rp; struct damos_quota *quota = &s->quota; @@ -1642,11 +1671,11 @@ static bool damos_skip_charged_region(struct damon_target *t, if (quota->charge_addr_from && r->ar.start < quota->charge_addr_from) { sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - - r->ar.start, DAMON_MIN_REGION); + r->ar.start, min_sz_region); if (!sz_to_skip) { - if (damon_sz_region(r) <= DAMON_MIN_REGION) + if (damon_sz_region(r) <= min_sz_region) return true; - sz_to_skip = DAMON_MIN_REGION; + sz_to_skip = min_sz_region; } damon_split_region_at(t, r, sz_to_skip); r = damon_next_region(r); @@ -1671,7 +1700,8 @@ static void damos_update_stat(struct damos *s, } static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t, - struct damon_region *r, struct damos_filter *filter) + struct damon_region *r, struct damos_filter *filter, + unsigned long min_sz_region) { bool matched = false; struct damon_target *ti; @@ -1688,8 +1718,8 @@ static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t, matched = target_idx == filter->target_idx; break; case DAMOS_FILTER_TYPE_ADDR: - start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION); - end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION); + start = ALIGN_DOWN(filter->addr_range.start, min_sz_region); + end = ALIGN_DOWN(filter->addr_range.end, min_sz_region); /* inside the range */ if (start <= r->ar.start && r->ar.end <= end) { @@ -1725,7 +1755,7 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, s->core_filters_allowed = false; damos_for_each_filter(filter, s) { - if (damos_filter_match(ctx, t, r, filter)) { + if (damos_filter_match(ctx, t, r, filter, ctx->min_sz_region)) { if (filter->allow) s->core_filters_allowed = true; return !filter->allow; @@ -1860,7 +1890,7 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, if (c->ops.apply_scheme) { if (quota->esz && quota->charged_sz + sz > quota->esz) { sz = ALIGN_DOWN(quota->esz - quota->charged_sz, - DAMON_MIN_REGION); + c->min_sz_region); if (!sz) goto update_stat; damon_split_region_at(t, r, sz); @@ -1908,7 +1938,7 @@ static void damon_do_apply_schemes(struct damon_ctx *c, if (quota->esz && quota->charged_sz >= quota->esz) continue; - if (damos_skip_charged_region(t, &r, s)) + if (damos_skip_charged_region(t, &r, s, c->min_sz_region)) continue; if (!damos_valid_target(c, t, r, s)) @@ -2112,8 +2142,10 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) return; /* First charge window */ - if (!quota->total_charged_sz && !quota->charged_from) + if (!quota->total_charged_sz && !quota->charged_from) { quota->charged_from = jiffies; + damos_set_effective_quota(quota); + } /* New charge window starts */ if (time_after_eq(jiffies, quota->charged_from + @@ -2231,6 +2263,8 @@ static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, damon_for_each_region_safe(r, next, t) { if (abs(r->nr_accesses - r->last_nr_accesses) > thres) r->age = 0; + else if ((r->nr_accesses == 0) != (r->last_nr_accesses == 0)) + r->age = 0; else r->age++; @@ -2306,7 +2340,8 @@ static void damon_split_region_at(struct damon_target *t, } /* Split every region in the given target into 'nr_subs' regions */ -static void damon_split_regions_of(struct damon_target *t, int nr_subs) +static void damon_split_regions_of(struct damon_target *t, int nr_subs, + unsigned long min_sz_region) { struct damon_region *r, *next; unsigned long sz_region, sz_sub = 0; @@ -2316,13 +2351,13 @@ static void damon_split_regions_of(struct damon_target *t, int nr_subs) sz_region = damon_sz_region(r); for (i = 0; i < nr_subs - 1 && - sz_region > 2 * DAMON_MIN_REGION; i++) { + sz_region > 2 * min_sz_region; i++) { /* * Randomly select size of left sub-region to be at * least 10 percent and at most 90% of original region */ sz_sub = ALIGN_DOWN(damon_rand(1, 10) * - sz_region / 10, DAMON_MIN_REGION); + sz_region / 10, min_sz_region); /* Do not allow blank region */ if (sz_sub == 0 || sz_sub >= sz_region) continue; @@ -2362,7 +2397,7 @@ static void kdamond_split_regions(struct damon_ctx *ctx) nr_subregions = 3; damon_for_each_target(t, ctx) - damon_split_regions_of(t, nr_subregions); + damon_split_regions_of(t, nr_subregions, ctx->min_sz_region); last_nr_regions = nr_regions; } @@ -2755,7 +2790,7 @@ int damon_set_region_biggest_system_ram_default(struct damon_target *t, addr_range.start = *start; addr_range.end = *end; - return damon_set_regions(t, &addr_range, 1); + return damon_set_regions(t, &addr_range, 1, DAMON_MIN_REGION); } /* @@ -2828,6 +2863,16 @@ void damon_update_region_access_rate(struct damon_region *r, bool accessed, r->nr_accesses++; } +/** + * damon_initialized() - Return if DAMON is ready to be used. + * + * Return: true if DAMON is ready to be used, false otherwise. + */ +bool damon_initialized(void) +{ + return damon_region_cache != NULL; +} + static int __init damon_init(void) { damon_region_cache = KMEM_CACHE(damon_region, 0); diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c index b5a5ed16a7a5..42b9a656f9de 100644 --- a/mm/damon/lru_sort.c +++ b/mm/damon/lru_sort.c @@ -112,6 +112,13 @@ static unsigned long monitor_region_end __read_mostly; module_param(monitor_region_end, ulong, 0600); /* + * Scale factor for DAMON_LRU_SORT to ops address conversion. + * + * This parameter must not be set to 0. + */ +static unsigned long addr_unit __read_mostly = 1; + +/* * PID of the DAMON thread * * If DAMON_LRU_SORT is enabled, this becomes the PID of the worker thread. @@ -198,12 +205,21 @@ static int damon_lru_sort_apply_parameters(void) if (err) return err; + /* + * If monitor_region_start/end are unset, always silently + * reset addr_unit to 1. + */ + if (!monitor_region_start && !monitor_region_end) + addr_unit = 1; + param_ctx->addr_unit = addr_unit; + param_ctx->min_sz_region = max(DAMON_MIN_REGION / addr_unit, 1); + if (!damon_lru_sort_mon_attrs.sample_interval) { err = -EINVAL; goto out; } - err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs); + err = damon_set_attrs(param_ctx, &damon_lru_sort_mon_attrs); if (err) goto out; @@ -290,6 +306,30 @@ static int damon_lru_sort_turn(bool on) return damon_call(ctx, &call_control); } +static int damon_lru_sort_addr_unit_store(const char *val, + const struct kernel_param *kp) +{ + unsigned long input_addr_unit; + int err = kstrtoul(val, 0, &input_addr_unit); + + if (err) + return err; + if (!input_addr_unit) + return -EINVAL; + + addr_unit = input_addr_unit; + return 0; +} + +static const struct kernel_param_ops addr_unit_param_ops = { + .set = damon_lru_sort_addr_unit_store, + .get = param_get_ulong, +}; + +module_param_cb(addr_unit, &addr_unit_param_ops, &addr_unit, 0600); +MODULE_PARM_DESC(addr_unit, + "Scale factor for DAMON_LRU_SORT to ops address conversion (default: 1)"); + static int damon_lru_sort_enabled_store(const char *val, const struct kernel_param *kp) { @@ -305,7 +345,7 @@ static int damon_lru_sort_enabled_store(const char *val, return 0; /* Called before init function. The function will handle this. */ - if (!ctx) + if (!damon_initialized()) goto set_param_out; err = damon_lru_sort_turn(enable); @@ -328,8 +368,13 @@ MODULE_PARM_DESC(enabled, static int __init damon_lru_sort_init(void) { - int err = damon_modules_new_paddr_ctx_target(&ctx, &target); + int err; + if (!damon_initialized()) { + err = -ENOMEM; + goto out; + } + err = damon_modules_new_paddr_ctx_target(&ctx, &target); if (err) goto out; diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c index 99321ff5cb92..998c5180a603 100644 --- a/mm/damon/ops-common.c +++ b/mm/damon/ops-common.c @@ -303,7 +303,7 @@ static unsigned int __damon_migrate_folio_list( * instead of migrated. */ .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | - __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT, + __GFP_NOMEMALLOC | GFP_NOWAIT, .nid = target_nid, }; @@ -412,3 +412,12 @@ unsigned long damon_migrate_pages(struct list_head *folio_list, int target_nid) return nr_migrated; } + +bool damos_ops_has_filter(struct damos *s) +{ + struct damos_filter *f; + + damos_for_each_ops_filter(f, s) + return true; + return false; +} diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h index 61ad54aaf256..5efa5b5970de 100644 --- a/mm/damon/ops-common.h +++ b/mm/damon/ops-common.h @@ -21,3 +21,5 @@ int damon_hot_score(struct damon_ctx *c, struct damon_region *r, bool damos_folio_filter_match(struct damos_filter *filter, struct folio *folio); unsigned long damon_migrate_pages(struct list_head *folio_list, int target_nid); + +bool damos_ops_has_filter(struct damos *s); diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 53a55c5114fb..07a8aead439e 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -18,7 +18,26 @@ #include "../internal.h" #include "ops-common.h" -static void damon_pa_mkold(unsigned long paddr) +static phys_addr_t damon_pa_phys_addr( + unsigned long addr, unsigned long addr_unit) +{ + return (phys_addr_t)addr * addr_unit; +} + +static unsigned long damon_pa_core_addr( + phys_addr_t pa, unsigned long addr_unit) +{ + /* + * Use div_u64() for avoiding linking errors related with __udivdi3, + * __aeabi_uldivmod, or similar problems. This should also improve the + * performance optimization (read div_u64() comment for the detail). + */ + if (sizeof(pa) == 8 && sizeof(addr_unit) == 4) + return div_u64(pa, addr_unit); + return pa / addr_unit; +} + +static void damon_pa_mkold(phys_addr_t paddr) { struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); @@ -29,11 +48,12 @@ static void damon_pa_mkold(unsigned long paddr) folio_put(folio); } -static void __damon_pa_prepare_access_check(struct damon_region *r) +static void __damon_pa_prepare_access_check(struct damon_region *r, + unsigned long addr_unit) { r->sampling_addr = damon_rand(r->ar.start, r->ar.end); - damon_pa_mkold(r->sampling_addr); + damon_pa_mkold(damon_pa_phys_addr(r->sampling_addr, addr_unit)); } static void damon_pa_prepare_access_checks(struct damon_ctx *ctx) @@ -43,11 +63,11 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx) damon_for_each_target(t, ctx) { damon_for_each_region(r, t) - __damon_pa_prepare_access_check(r); + __damon_pa_prepare_access_check(r, ctx->addr_unit); } } -static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz) +static bool damon_pa_young(phys_addr_t paddr, unsigned long *folio_sz) { struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); bool accessed; @@ -62,23 +82,25 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz) } static void __damon_pa_check_access(struct damon_region *r, - struct damon_attrs *attrs) + struct damon_attrs *attrs, unsigned long addr_unit) { - static unsigned long last_addr; + static phys_addr_t last_addr; static unsigned long last_folio_sz = PAGE_SIZE; static bool last_accessed; + phys_addr_t sampling_addr = damon_pa_phys_addr( + r->sampling_addr, addr_unit); /* If the region is in the last checked page, reuse the result */ if (ALIGN_DOWN(last_addr, last_folio_sz) == - ALIGN_DOWN(r->sampling_addr, last_folio_sz)) { + ALIGN_DOWN(sampling_addr, last_folio_sz)) { damon_update_region_access_rate(r, last_accessed, attrs); return; } - last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz); + last_accessed = damon_pa_young(sampling_addr, &last_folio_sz); damon_update_region_access_rate(r, last_accessed, attrs); - last_addr = r->sampling_addr; + last_addr = sampling_addr; } static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx) @@ -89,7 +111,8 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx) damon_for_each_target(t, ctx) { damon_for_each_region(r, t) { - __damon_pa_check_access(r, &ctx->attrs); + __damon_pa_check_access( + r, &ctx->attrs, ctx->addr_unit); max_nr_accesses = max(r->nr_accesses, max_nr_accesses); } } @@ -125,10 +148,11 @@ static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s) return false; } -static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, +static unsigned long damon_pa_pageout(struct damon_region *r, + unsigned long addr_unit, struct damos *s, unsigned long *sz_filter_passed) { - unsigned long addr, applied; + phys_addr_t addr, applied; LIST_HEAD(folio_list); bool install_young_filter = true; struct damos_filter *filter; @@ -149,8 +173,8 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, damos_add_filter(s, filter); } - addr = r->ar.start; - while (addr < r->ar.end) { + addr = damon_pa_phys_addr(r->ar.start, addr_unit); + while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) { folio = damon_get_folio(PHYS_PFN(addr)); if (damon_pa_invalid_damos_folio(folio, s)) { addr += PAGE_SIZE; @@ -160,7 +184,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, if (damos_pa_filter_out(s, folio)) goto put_folio; else - *sz_filter_passed += folio_size(folio); + *sz_filter_passed += folio_size(folio) / addr_unit; folio_clear_referenced(folio); folio_test_clear_young(folio); @@ -179,18 +203,19 @@ put_folio: applied = reclaim_pages(&folio_list); cond_resched(); s->last_applied = folio; - return applied * PAGE_SIZE; + return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit); } static inline unsigned long damon_pa_mark_accessed_or_deactivate( - struct damon_region *r, struct damos *s, bool mark_accessed, + struct damon_region *r, unsigned long addr_unit, + struct damos *s, bool mark_accessed, unsigned long *sz_filter_passed) { - unsigned long addr, applied = 0; + phys_addr_t addr, applied = 0; struct folio *folio; - addr = r->ar.start; - while (addr < r->ar.end) { + addr = damon_pa_phys_addr(r->ar.start, addr_unit); + while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) { folio = damon_get_folio(PHYS_PFN(addr)); if (damon_pa_invalid_damos_folio(folio, s)) { addr += PAGE_SIZE; @@ -200,7 +225,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( if (damos_pa_filter_out(s, folio)) goto put_folio; else - *sz_filter_passed += folio_size(folio); + *sz_filter_passed += folio_size(folio) / addr_unit; if (mark_accessed) folio_mark_accessed(folio); @@ -212,32 +237,35 @@ put_folio: folio_put(folio); } s->last_applied = folio; - return applied * PAGE_SIZE; + return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit); } static unsigned long damon_pa_mark_accessed(struct damon_region *r, - struct damos *s, unsigned long *sz_filter_passed) + unsigned long addr_unit, struct damos *s, + unsigned long *sz_filter_passed) { - return damon_pa_mark_accessed_or_deactivate(r, s, true, + return damon_pa_mark_accessed_or_deactivate(r, addr_unit, s, true, sz_filter_passed); } static unsigned long damon_pa_deactivate_pages(struct damon_region *r, - struct damos *s, unsigned long *sz_filter_passed) + unsigned long addr_unit, struct damos *s, + unsigned long *sz_filter_passed) { - return damon_pa_mark_accessed_or_deactivate(r, s, false, + return damon_pa_mark_accessed_or_deactivate(r, addr_unit, s, false, sz_filter_passed); } -static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s, +static unsigned long damon_pa_migrate(struct damon_region *r, + unsigned long addr_unit, struct damos *s, unsigned long *sz_filter_passed) { - unsigned long addr, applied; + phys_addr_t addr, applied; LIST_HEAD(folio_list); struct folio *folio; - addr = r->ar.start; - while (addr < r->ar.end) { + addr = damon_pa_phys_addr(r->ar.start, addr_unit); + while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) { folio = damon_get_folio(PHYS_PFN(addr)); if (damon_pa_invalid_damos_folio(folio, s)) { addr += PAGE_SIZE; @@ -247,7 +275,7 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s, if (damos_pa_filter_out(s, folio)) goto put_folio; else - *sz_filter_passed += folio_size(folio); + *sz_filter_passed += folio_size(folio) / addr_unit; if (!folio_isolate_lru(folio)) goto put_folio; @@ -259,29 +287,21 @@ put_folio: applied = damon_migrate_pages(&folio_list, s->target_nid); cond_resched(); s->last_applied = folio; - return applied * PAGE_SIZE; + return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit); } -static bool damon_pa_scheme_has_filter(struct damos *s) -{ - struct damos_filter *f; - - damos_for_each_ops_filter(f, s) - return true; - return false; -} - -static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s, +static unsigned long damon_pa_stat(struct damon_region *r, + unsigned long addr_unit, struct damos *s, unsigned long *sz_filter_passed) { - unsigned long addr; + phys_addr_t addr; struct folio *folio; - if (!damon_pa_scheme_has_filter(s)) + if (!damos_ops_has_filter(s)) return 0; - addr = r->ar.start; - while (addr < r->ar.end) { + addr = damon_pa_phys_addr(r->ar.start, addr_unit); + while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) { folio = damon_get_folio(PHYS_PFN(addr)); if (damon_pa_invalid_damos_folio(folio, s)) { addr += PAGE_SIZE; @@ -289,7 +309,7 @@ static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s, } if (!damos_pa_filter_out(s, folio)) - *sz_filter_passed += folio_size(folio); + *sz_filter_passed += folio_size(folio) / addr_unit; addr += folio_size(folio); folio_put(folio); } @@ -301,18 +321,22 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, struct damos *scheme, unsigned long *sz_filter_passed) { + unsigned long aunit = ctx->addr_unit; + switch (scheme->action) { case DAMOS_PAGEOUT: - return damon_pa_pageout(r, scheme, sz_filter_passed); + return damon_pa_pageout(r, aunit, scheme, sz_filter_passed); case DAMOS_LRU_PRIO: - return damon_pa_mark_accessed(r, scheme, sz_filter_passed); + return damon_pa_mark_accessed(r, aunit, scheme, + sz_filter_passed); case DAMOS_LRU_DEPRIO: - return damon_pa_deactivate_pages(r, scheme, sz_filter_passed); + return damon_pa_deactivate_pages(r, aunit, scheme, + sz_filter_passed); case DAMOS_MIGRATE_HOT: case DAMOS_MIGRATE_COLD: - return damon_pa_migrate(r, scheme, sz_filter_passed); + return damon_pa_migrate(r, aunit, scheme, sz_filter_passed); case DAMOS_STAT: - return damon_pa_stat(r, scheme, sz_filter_passed); + return damon_pa_stat(r, aunit, scheme, sz_filter_passed); default: /* DAMOS actions that not yet supported by 'paddr'. */ break; diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c index fb7c982a0018..7ba3d0f9a19a 100644 --- a/mm/damon/reclaim.c +++ b/mm/damon/reclaim.c @@ -129,6 +129,13 @@ static unsigned long monitor_region_end __read_mostly; module_param(monitor_region_end, ulong, 0600); /* + * Scale factor for DAMON_RECLAIM to ops address conversion. + * + * This parameter must not be set to 0. + */ +static unsigned long addr_unit __read_mostly = 1; + +/* * Skip anonymous pages reclamation. * * If this parameter is set as ``Y``, DAMON_RECLAIM does not reclaim anonymous @@ -194,6 +201,15 @@ static int damon_reclaim_apply_parameters(void) if (err) return err; + /* + * If monitor_region_start/end are unset, always silently + * reset addr_unit to 1. + */ + if (!monitor_region_start && !monitor_region_end) + addr_unit = 1; + param_ctx->addr_unit = addr_unit; + param_ctx->min_sz_region = max(DAMON_MIN_REGION / addr_unit, 1); + if (!damon_reclaim_mon_attrs.aggr_interval) { err = -EINVAL; goto out; @@ -294,6 +310,30 @@ static int damon_reclaim_turn(bool on) return damon_call(ctx, &call_control); } +static int damon_reclaim_addr_unit_store(const char *val, + const struct kernel_param *kp) +{ + unsigned long input_addr_unit; + int err = kstrtoul(val, 0, &input_addr_unit); + + if (err) + return err; + if (!input_addr_unit) + return -EINVAL; + + addr_unit = input_addr_unit; + return 0; +} + +static const struct kernel_param_ops addr_unit_param_ops = { + .set = damon_reclaim_addr_unit_store, + .get = param_get_ulong, +}; + +module_param_cb(addr_unit, &addr_unit_param_ops, &addr_unit, 0600); +MODULE_PARM_DESC(addr_unit, + "Scale factor for DAMON_RECLAIM to ops address conversion (default: 1)"); + static int damon_reclaim_enabled_store(const char *val, const struct kernel_param *kp) { @@ -309,7 +349,7 @@ static int damon_reclaim_enabled_store(const char *val, return 0; /* Called before init function. The function will handle this. */ - if (!ctx) + if (!damon_initialized()) goto set_param_out; err = damon_reclaim_turn(enable); @@ -332,8 +372,13 @@ MODULE_PARM_DESC(enabled, static int __init damon_reclaim_init(void) { - int err = damon_modules_new_paddr_ctx_target(&ctx, &target); + int err; + if (!damon_initialized()) { + err = -ENOMEM; + goto out; + } + err = damon_modules_new_paddr_ctx_target(&ctx, &target); if (err) goto out; diff --git a/mm/damon/stat.c b/mm/damon/stat.c index 87bcd8866d4b..d8010968bbed 100644 --- a/mm/damon/stat.c +++ b/mm/damon/stat.c @@ -34,11 +34,16 @@ module_param(estimated_memory_bandwidth, ulong, 0400); MODULE_PARM_DESC(estimated_memory_bandwidth, "Estimated memory bandwidth usage in bytes per second"); -static unsigned long memory_idle_ms_percentiles[101] __read_mostly = {0,}; -module_param_array(memory_idle_ms_percentiles, ulong, NULL, 0400); +static long memory_idle_ms_percentiles[101] __read_mostly = {0,}; +module_param_array(memory_idle_ms_percentiles, long, NULL, 0400); MODULE_PARM_DESC(memory_idle_ms_percentiles, "Memory idle time percentiles in milliseconds"); +static unsigned long aggr_interval_us; +module_param(aggr_interval_us, ulong, 0400); +MODULE_PARM_DESC(aggr_interval_us, + "Current tuned aggregation interval in microseconds"); + static struct damon_ctx *damon_stat_context; static void damon_stat_set_estimated_memory_bandwidth(struct damon_ctx *c) @@ -56,10 +61,10 @@ static void damon_stat_set_estimated_memory_bandwidth(struct damon_ctx *c) MSEC_PER_SEC / c->attrs.aggr_interval; } -static unsigned int damon_stat_idletime(const struct damon_region *r) +static int damon_stat_idletime(const struct damon_region *r) { if (r->nr_accesses) - return 0; + return -1 * (r->age + 1); return r->age + 1; } @@ -117,7 +122,7 @@ static void damon_stat_set_idletime_percentiles(struct damon_ctx *c) while (next_percentile <= accounted_bytes * 100 / total_sz) memory_idle_ms_percentiles[next_percentile++] = damon_stat_idletime(region) * - c->attrs.aggr_interval / USEC_PER_MSEC; + (long)c->attrs.aggr_interval / USEC_PER_MSEC; } kfree(sorted_regions); } @@ -133,6 +138,7 @@ static int damon_stat_damon_call_fn(void *data) return 0; last_refresh_jiffies = jiffies; + aggr_interval_us = c->attrs.aggr_interval; damon_stat_set_estimated_memory_bandwidth(c); damon_stat_set_idletime_percentiles(c); return 0; @@ -214,8 +220,6 @@ static void damon_stat_stop(void) damon_destroy_ctx(damon_stat_context); } -static bool damon_stat_init_called; - static int damon_stat_enabled_store( const char *val, const struct kernel_param *kp) { @@ -229,7 +233,7 @@ static int damon_stat_enabled_store( if (is_enabled == enabled) return 0; - if (!damon_stat_init_called) + if (!damon_initialized()) /* * probably called from command line parsing (parse_args()). * Cannot call damon_new_ctx(). Let damon_stat_init() handle. @@ -250,12 +254,16 @@ static int __init damon_stat_init(void) { int err = 0; - damon_stat_init_called = true; + if (!damon_initialized()) { + err = -ENOMEM; + goto out; + } /* probably set via command line */ if (enabled) err = damon_stat_start(); +out: if (err && enabled) enabled = false; return err; diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index c96c2154128f..2fc722f998f8 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -834,6 +834,7 @@ static const struct damon_sysfs_ops_name damon_sysfs_ops_names[] = { struct damon_sysfs_context { struct kobject kobj; enum damon_ops_id ops_id; + unsigned long addr_unit; struct damon_sysfs_attrs *attrs; struct damon_sysfs_targets *targets; struct damon_sysfs_schemes *schemes; @@ -849,6 +850,7 @@ static struct damon_sysfs_context *damon_sysfs_context_alloc( return NULL; context->kobj = (struct kobject){}; context->ops_id = ops_id; + context->addr_unit = 1; return context; } @@ -997,6 +999,32 @@ static ssize_t operations_store(struct kobject *kobj, return -EINVAL; } +static ssize_t addr_unit_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_context *context = container_of(kobj, + struct damon_sysfs_context, kobj); + + return sysfs_emit(buf, "%lu\n", context->addr_unit); +} + +static ssize_t addr_unit_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_context *context = container_of(kobj, + struct damon_sysfs_context, kobj); + unsigned long input_addr_unit; + int err = kstrtoul(buf, 0, &input_addr_unit); + + if (err) + return err; + if (!input_addr_unit) + return -EINVAL; + + context->addr_unit = input_addr_unit; + return count; +} + static void damon_sysfs_context_release(struct kobject *kobj) { kfree(container_of(kobj, struct damon_sysfs_context, kobj)); @@ -1008,9 +1036,13 @@ static struct kobj_attribute damon_sysfs_context_avail_operations_attr = static struct kobj_attribute damon_sysfs_context_operations_attr = __ATTR_RW_MODE(operations, 0600); +static struct kobj_attribute damon_sysfs_context_addr_unit_attr = + __ATTR_RW_MODE(addr_unit, 0600); + static struct attribute *damon_sysfs_context_attrs[] = { &damon_sysfs_context_avail_operations_attr.attr, &damon_sysfs_context_operations_attr.attr, + &damon_sysfs_context_addr_unit_attr.attr, NULL, }; ATTRIBUTE_GROUPS(damon_sysfs_context); @@ -1301,7 +1333,8 @@ static int damon_sysfs_set_attrs(struct damon_ctx *ctx, } static int damon_sysfs_set_regions(struct damon_target *t, - struct damon_sysfs_regions *sysfs_regions) + struct damon_sysfs_regions *sysfs_regions, + unsigned long min_sz_region) { struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN); @@ -1323,7 +1356,7 @@ static int damon_sysfs_set_regions(struct damon_target *t, if (ranges[i - 1].end > ranges[i].start) goto out; } - err = damon_set_regions(t, ranges, sysfs_regions->nr); + err = damon_set_regions(t, ranges, sysfs_regions->nr, min_sz_region); out: kfree(ranges); return err; @@ -1344,7 +1377,7 @@ static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target, /* caller will destroy targets */ return -EINVAL; } - return damon_sysfs_set_regions(t, sys_target->regions); + return damon_sysfs_set_regions(t, sys_target->regions, ctx->min_sz_region); } static int damon_sysfs_add_targets(struct damon_ctx *ctx, @@ -1401,6 +1434,11 @@ static int damon_sysfs_apply_inputs(struct damon_ctx *ctx, err = damon_select_ops(ctx, sys_ctx->ops_id); if (err) return err; + ctx->addr_unit = sys_ctx->addr_unit; + /* addr_unit is respected by only DAMON_OPS_PADDR */ + if (sys_ctx->ops_id == DAMON_OPS_PADDR) + ctx->min_sz_region = max( + DAMON_MIN_REGION / sys_ctx->addr_unit, 1); err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs); if (err) return err; @@ -1592,12 +1630,14 @@ static int damon_sysfs_damon_call(int (*fn)(void *data), struct damon_sysfs_kdamond *kdamond) { struct damon_call_control call_control = {}; + int err; if (!kdamond->damon_ctx) return -EINVAL; call_control.fn = fn; call_control.data = kdamond; - return damon_call(kdamond->damon_ctx, &call_control); + err = damon_call(kdamond->damon_ctx, &call_control); + return err ? err : call_control.return_code; } struct damon_sysfs_schemes_walk_data { diff --git a/mm/damon/tests/core-kunit.h b/mm/damon/tests/core-kunit.h index dfedfff19940..51369e35298b 100644 --- a/mm/damon/tests/core-kunit.h +++ b/mm/damon/tests/core-kunit.h @@ -230,14 +230,14 @@ static void damon_test_split_regions_of(struct kunit *test) t = damon_new_target(); r = damon_new_region(0, 22); damon_add_region(r, t); - damon_split_regions_of(t, 2); + damon_split_regions_of(t, 2, DAMON_MIN_REGION); KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u); damon_free_target(t); t = damon_new_target(); r = damon_new_region(0, 220); damon_add_region(r, t); - damon_split_regions_of(t, 4); + damon_split_regions_of(t, 4, DAMON_MIN_REGION); KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u); damon_free_target(t); damon_destroy_ctx(c); @@ -303,7 +303,7 @@ static void damon_test_set_regions(struct kunit *test) damon_add_region(r1, t); damon_add_region(r2, t); - damon_set_regions(t, &range, 1); + damon_set_regions(t, &range, 1, DAMON_MIN_REGION); KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3); damon_for_each_region(r, t) { @@ -419,6 +419,22 @@ static void damos_test_new_filter(struct kunit *test) damos_destroy_filter(filter); } +static void damos_test_commit_filter(struct kunit *test) +{ + struct damos_filter *src_filter = damos_new_filter( + DAMOS_FILTER_TYPE_ANON, true, true); + struct damos_filter *dst_filter = damos_new_filter( + DAMOS_FILTER_TYPE_ACTIVE, false, false); + + damos_commit_filter(dst_filter, src_filter); + KUNIT_EXPECT_EQ(test, dst_filter->type, src_filter->type); + KUNIT_EXPECT_EQ(test, dst_filter->matching, src_filter->matching); + KUNIT_EXPECT_EQ(test, dst_filter->allow, src_filter->allow); + + damos_destroy_filter(src_filter); + damos_destroy_filter(dst_filter); +} + static void damos_test_filter_out(struct kunit *test) { struct damon_target *t; @@ -434,25 +450,29 @@ static void damos_test_filter_out(struct kunit *test) damon_add_region(r, t); /* region in the range */ - KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f)); + KUNIT_EXPECT_TRUE(test, + damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); /* region before the range */ r->ar.start = DAMON_MIN_REGION * 1; r->ar.end = DAMON_MIN_REGION * 2; - KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f)); + KUNIT_EXPECT_FALSE(test, + damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); /* region after the range */ r->ar.start = DAMON_MIN_REGION * 6; r->ar.end = DAMON_MIN_REGION * 8; - KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f)); + KUNIT_EXPECT_FALSE(test, + damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); /* region started before the range */ r->ar.start = DAMON_MIN_REGION * 1; r->ar.end = DAMON_MIN_REGION * 4; - KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f)); + KUNIT_EXPECT_FALSE(test, + damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); /* filter should have split the region */ KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 1); KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 2); @@ -465,7 +485,8 @@ static void damos_test_filter_out(struct kunit *test) /* region started in the range */ r->ar.start = DAMON_MIN_REGION * 2; r->ar.end = DAMON_MIN_REGION * 8; - KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f)); + KUNIT_EXPECT_TRUE(test, + damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); /* filter should have split the region */ KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 2); KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 6); @@ -594,6 +615,7 @@ static struct kunit_case damon_test_cases[] = { KUNIT_CASE(damon_test_set_attrs), KUNIT_CASE(damon_test_moving_sum), KUNIT_CASE(damos_test_new_filter), + KUNIT_CASE(damos_test_commit_filter), KUNIT_CASE(damos_test_filter_out), KUNIT_CASE(damon_test_feed_loop_next_input), KUNIT_CASE(damon_test_set_filters_default_reject), diff --git a/mm/damon/tests/vaddr-kunit.h b/mm/damon/tests/vaddr-kunit.h index d2b37ccf2cc0..fce38dd53cf8 100644 --- a/mm/damon/tests/vaddr-kunit.h +++ b/mm/damon/tests/vaddr-kunit.h @@ -141,7 +141,7 @@ static void damon_do_test_apply_three_regions(struct kunit *test, damon_add_region(r, t); } - damon_set_regions(t, three_regions, 3); + damon_set_regions(t, three_regions, 3, DAMON_MIN_REGION); for (i = 0; i < nr_expected / 2; i++) { r = __nth_region_of(t, i); diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 87e825349bdf..8c048f9b129e 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -299,7 +299,7 @@ static void damon_va_update(struct damon_ctx *ctx) damon_for_each_target(t, ctx) { if (damon_va_three_regions(t, three_regions)) continue; - damon_set_regions(t, three_regions, 3); + damon_set_regions(t, three_regions, 3, DAMON_MIN_REGION); } } @@ -890,6 +890,107 @@ free_lists: return applied * PAGE_SIZE; } +struct damos_va_stat_private { + struct damos *scheme; + unsigned long *sz_filter_passed; +}; + +static inline bool damos_va_invalid_folio(struct folio *folio, + struct damos *s) +{ + return !folio || folio == s->last_applied; +} + +static int damos_va_stat_pmd_entry(pmd_t *pmd, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + struct damos_va_stat_private *priv = walk->private; + struct damos *s = priv->scheme; + unsigned long *sz_filter_passed = priv->sz_filter_passed; + struct vm_area_struct *vma = walk->vma; + struct folio *folio; + spinlock_t *ptl; + pte_t *start_pte, *pte, ptent; + int nr; + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (pmd_trans_huge(*pmd)) { + pmd_t pmde; + + ptl = pmd_trans_huge_lock(pmd, vma); + if (!ptl) + return 0; + pmde = pmdp_get(pmd); + if (!pmd_present(pmde)) + goto huge_unlock; + + folio = vm_normal_folio_pmd(vma, addr, pmde); + + if (damos_va_invalid_folio(folio, s)) + goto huge_unlock; + + if (!damos_va_filter_out(s, folio, vma, addr, NULL, pmd)) + *sz_filter_passed += folio_size(folio); + s->last_applied = folio; + +huge_unlock: + spin_unlock(ptl); + return 0; + } +#endif + start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!start_pte) + return 0; + + for (; addr < next; pte += nr, addr += nr * PAGE_SIZE) { + nr = 1; + ptent = ptep_get(pte); + + if (pte_none(ptent) || !pte_present(ptent)) + continue; + + folio = vm_normal_folio(vma, addr, ptent); + + if (damos_va_invalid_folio(folio, s)) + continue; + + if (!damos_va_filter_out(s, folio, vma, addr, pte, NULL)) + *sz_filter_passed += folio_size(folio); + nr = folio_nr_pages(folio); + s->last_applied = folio; + } + pte_unmap_unlock(start_pte, ptl); + return 0; +} + +static unsigned long damos_va_stat(struct damon_target *target, + struct damon_region *r, struct damos *s, + unsigned long *sz_filter_passed) +{ + struct damos_va_stat_private priv; + struct mm_struct *mm; + struct mm_walk_ops walk_ops = { + .pmd_entry = damos_va_stat_pmd_entry, + .walk_lock = PGWALK_RDLOCK, + }; + + priv.scheme = s; + priv.sz_filter_passed = sz_filter_passed; + + if (!damos_ops_has_filter(s)) + return 0; + + mm = damon_get_mm(target); + if (!mm) + return 0; + + mmap_read_lock(mm); + walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &priv); + mmap_read_unlock(mm); + mmput(mm); + return 0; +} + static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, struct damos *scheme, unsigned long *sz_filter_passed) @@ -916,7 +1017,7 @@ static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx, case DAMOS_MIGRATE_COLD: return damos_va_migrate(t, r, scheme, sz_filter_passed); case DAMOS_STAT: - return 0; + return damos_va_stat(t, r, scheme, sz_filter_passed); default: /* * DAMOS actions that are not yet supported by 'vaddr'. |