diff options
Diffstat (limited to 'mm/damon')
-rw-r--r-- | mm/damon/Kconfig | 37 | ||||
-rw-r--r-- | mm/damon/Makefile | 1 | ||||
-rw-r--r-- | mm/damon/core.c | 1063 | ||||
-rw-r--r-- | mm/damon/dbgfs-test.h | 163 | ||||
-rw-r--r-- | mm/damon/dbgfs.c | 1148 | ||||
-rw-r--r-- | mm/damon/lru_sort.c | 56 | ||||
-rw-r--r-- | mm/damon/modules-common.c | 2 | ||||
-rw-r--r-- | mm/damon/modules-common.h | 2 | ||||
-rw-r--r-- | mm/damon/ops-common.c | 27 | ||||
-rw-r--r-- | mm/damon/ops-common.h | 2 | ||||
-rw-r--r-- | mm/damon/paddr.c | 385 | ||||
-rw-r--r-- | mm/damon/reclaim.c | 67 | ||||
-rw-r--r-- | mm/damon/sysfs-common.c | 2 | ||||
-rw-r--r-- | mm/damon/sysfs-common.h | 20 | ||||
-rw-r--r-- | mm/damon/sysfs-schemes.c | 616 | ||||
-rw-r--r-- | mm/damon/sysfs.c | 622 | ||||
-rw-r--r-- | mm/damon/tests/.kunitconfig | 15 | ||||
-rw-r--r-- | mm/damon/tests/core-kunit.h (renamed from mm/damon/core-test.h) | 125 | ||||
-rw-r--r-- | mm/damon/tests/sysfs-kunit.h (renamed from mm/damon/sysfs-test.h) | 11 | ||||
-rw-r--r-- | mm/damon/tests/vaddr-kunit.h (renamed from mm/damon/vaddr-test.h) | 8 | ||||
-rw-r--r-- | mm/damon/vaddr.c | 24 |
21 files changed, 2226 insertions, 2170 deletions
diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig index fecb8172410c..c93d0c56b963 100644 --- a/mm/damon/Kconfig +++ b/mm/damon/Kconfig @@ -9,7 +9,7 @@ config DAMON access frequency of each memory region. The information can be useful for performance-centric DRAM level memory management. - See https://damonitor.github.io/doc/html/latest-damon/index.html for + See https://www.kernel.org/doc/html/latest/mm/damon/index.html for more information. config DAMON_KUNIT_TEST @@ -28,6 +28,7 @@ config DAMON_VADDR bool "Data access monitoring operations for virtual address spaces" depends on DAMON && MMU select PAGE_IDLE_FLAG + default DAMON help This builds the default data access monitoring operations for DAMON that work for virtual address spaces. @@ -36,6 +37,7 @@ config DAMON_PADDR bool "Data access monitoring operations for the physical address space" depends on DAMON && MMU select PAGE_IDLE_FLAG + default DAMON help This builds the default data access monitoring operations for DAMON that works for the physical address space. @@ -55,12 +57,13 @@ config DAMON_VADDR_KUNIT_TEST config DAMON_SYSFS bool "DAMON sysfs interface" depends on DAMON && SYSFS + default DAMON help This builds the sysfs interface for DAMON. The user space can use the interface for arbitrary data access monitoring. config DAMON_SYSFS_KUNIT_TEST - bool "Test for damon debugfs interface" if !KUNIT_ALL_TESTS + bool "Test for damon sysfs interface" if !KUNIT_ALL_TESTS depends on DAMON_SYSFS && KUNIT=y default KUNIT_ALL_TESTS help @@ -71,36 +74,6 @@ config DAMON_SYSFS_KUNIT_TEST If unsure, say N. -config DAMON_DBGFS_DEPRECATED - bool "DAMON debugfs interface (DEPRECATED!)" - depends on DAMON_VADDR && DAMON_PADDR && DEBUG_FS - help - This builds the debugfs interface for DAMON. The user space admins - can use the interface for arbitrary data access monitoring. - - If unsure, say N. - - This is deprecated, so users should move to the sysfs interface - (DAMON_SYSFS). If you depend on this and cannot move, please report - your usecase to damon@lists.linux.dev and linux-mm@kvack.org. - -config DAMON_DBGFS - bool - default y - depends on DAMON_DBGFS_DEPRECATED - -config DAMON_DBGFS_KUNIT_TEST - bool "Test for damon debugfs interface" if !KUNIT_ALL_TESTS - depends on DAMON_DBGFS && KUNIT=y - default KUNIT_ALL_TESTS - help - This builds the DAMON debugfs interface Kunit test suite. - - For more information on KUnit and unit tests in general, please refer - to the KUnit documentation. - - If unsure, say N. - config DAMON_RECLAIM bool "Build DAMON-based reclaim (DAMON_RECLAIM)" depends on DAMON_PADDR diff --git a/mm/damon/Makefile b/mm/damon/Makefile index f7add3f4aa79..8b49012ba8c3 100644 --- a/mm/damon/Makefile +++ b/mm/damon/Makefile @@ -4,6 +4,5 @@ obj-y := core.o obj-$(CONFIG_DAMON_VADDR) += ops-common.o vaddr.o obj-$(CONFIG_DAMON_PADDR) += ops-common.o paddr.o obj-$(CONFIG_DAMON_SYSFS) += sysfs-common.o sysfs-schemes.o sysfs.o -obj-$(CONFIG_DAMON_DBGFS) += dbgfs.o obj-$(CONFIG_DAMON_RECLAIM) += modules-common.o reclaim.o obj-$(CONFIG_DAMON_LRU_SORT) += modules-common.o lru_sort.o diff --git a/mm/damon/core.c b/mm/damon/core.c index 6d503c1c125e..339116ea30e3 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -14,6 +14,7 @@ #include <linux/psi.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/string_choices.h> #define CREATE_TRACE_POINTS #include <trace/events/damon.h> @@ -75,14 +76,13 @@ int damon_register_ops(struct damon_operations *ops) if (ops->id >= NR_DAMON_OPS) return -EINVAL; + mutex_lock(&damon_ops_lock); /* Fail for already registered ops */ - if (__damon_is_registered_ops(ops->id)) { + if (__damon_is_registered_ops(ops->id)) err = -EINVAL; - goto out; - } - damon_registered_ops[ops->id] = *ops; -out: + else + damon_registered_ops[ops->id] = *ops; mutex_unlock(&damon_ops_lock); return err; } @@ -266,7 +266,7 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, } struct damos_filter *damos_new_filter(enum damos_filter_type type, - bool matching) + bool matching, bool allow) { struct damos_filter *filter; @@ -275,13 +275,36 @@ struct damos_filter *damos_new_filter(enum damos_filter_type type, return NULL; filter->type = type; filter->matching = matching; + filter->allow = allow; INIT_LIST_HEAD(&filter->list); return filter; } +/** + * damos_filter_for_ops() - Return if the filter is ops-hndled one. + * @type: type of the filter. + * + * Return: true if the filter of @type needs to be handled by ops layer, false + * otherwise. + */ +bool damos_filter_for_ops(enum damos_filter_type type) +{ + switch (type) { + case DAMOS_FILTER_TYPE_ADDR: + case DAMOS_FILTER_TYPE_TARGET: + return false; + default: + break; + } + return true; +} + void damos_add_filter(struct damos *s, struct damos_filter *f) { - list_add_tail(&f->list, &s->filters); + if (damos_filter_for_ops(f->type)) + list_add_tail(&f->list, &s->ops_filters); + else + list_add_tail(&f->list, &s->filters); } static void damos_del_filter(struct damos_filter *f) @@ -346,6 +369,7 @@ static struct damos_quota *damos_quota_init(struct damos_quota *quota) quota->charged_from = 0; quota->charge_target_from = NULL; quota->charge_addr_from = 0; + quota->esz_bp = 0; return quota; } @@ -353,7 +377,8 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern, enum damos_action action, unsigned long apply_interval_us, struct damos_quota *quota, - struct damos_watermarks *wmarks) + struct damos_watermarks *wmarks, + int target_nid) { struct damos *scheme; @@ -369,7 +394,9 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern, * or damon_attrs are updated. */ scheme->next_apply_sis = 0; + scheme->walk_completed = false; INIT_LIST_HEAD(&scheme->filters); + INIT_LIST_HEAD(&scheme->ops_filters); scheme->stat = (struct damos_stat){}; INIT_LIST_HEAD(&scheme->list); @@ -380,6 +407,8 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern, scheme->wmarks = *wmarks; scheme->wmarks.activated = true; + scheme->target_nid = target_nid; + return scheme; } @@ -495,11 +524,13 @@ struct damon_ctx *damon_new_ctx(void) ctx->attrs.ops_update_interval = 60 * 1000 * 1000; ctx->passed_sample_intervals = 0; - /* These will be set from kdamond_init_intervals_sis() */ + /* These will be set from kdamond_init_ctx() */ ctx->next_aggregation_sis = 0; ctx->next_ops_update_sis = 0; mutex_init(&ctx->kdamond_lock); + mutex_init(&ctx->call_control_lock); + mutex_init(&ctx->walk_control_lock); ctx->attrs.min_nr_regions = 10; ctx->attrs.max_nr_regions = 1000; @@ -548,7 +579,13 @@ static unsigned int damon_accesses_bp_to_nr_accesses( return accesses_bp * damon_max_nr_accesses(attrs) / 10000; } -/* convert nr_accesses to access ratio in bp (per 10,000) */ +/* + * Convert nr_accesses to access ratio in bp (per 10,000). + * + * Callers should ensure attrs.aggr_interval is not zero, like + * damon_update_monitoring_results() does . Otherwise, divide-by-zero would + * happen. + */ static unsigned int damon_nr_accesses_to_accesses_bp( unsigned int nr_accesses, struct damon_attrs *attrs) { @@ -565,11 +602,25 @@ static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, } static void damon_update_monitoring_result(struct damon_region *r, - struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) -{ - r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses, - old_attrs, new_attrs); - r->nr_accesses_bp = r->nr_accesses * 10000; + struct damon_attrs *old_attrs, struct damon_attrs *new_attrs, + bool aggregating) +{ + if (!aggregating) { + r->nr_accesses = damon_nr_accesses_for_new_attrs( + r->nr_accesses, old_attrs, new_attrs); + r->nr_accesses_bp = r->nr_accesses * 10000; + } else { + /* + * if this is called in the middle of the aggregation, reset + * the aggregations we made so far for this aggregation + * interval. In other words, make the status like + * kdamond_reset_aggregated() is called. + */ + r->last_nr_accesses = damon_nr_accesses_for_new_attrs( + r->last_nr_accesses, old_attrs, new_attrs); + r->nr_accesses_bp = r->last_nr_accesses * 10000; + r->nr_accesses = 0; + } r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); } @@ -582,7 +633,7 @@ static void damon_update_monitoring_result(struct damon_region *r, * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. */ static void damon_update_monitoring_results(struct damon_ctx *ctx, - struct damon_attrs *new_attrs) + struct damon_attrs *new_attrs, bool aggregating) { struct damon_attrs *old_attrs = &ctx->attrs; struct damon_target *t; @@ -597,7 +648,26 @@ static void damon_update_monitoring_results(struct damon_ctx *ctx, damon_for_each_target(t, ctx) damon_for_each_region(r, t) damon_update_monitoring_result( - r, old_attrs, new_attrs); + r, old_attrs, new_attrs, aggregating); +} + +/* + * damon_valid_intervals_goal() - return if the intervals goal of @attrs is + * valid. + */ +static bool damon_valid_intervals_goal(struct damon_attrs *attrs) +{ + struct damon_intervals_goal *goal = &attrs->intervals_goal; + + /* tuning is disabled */ + if (!goal->aggrs) + return true; + if (goal->min_sample_us > goal->max_sample_us) + return false; + if (attrs->sample_interval < goal->min_sample_us || + goal->max_sample_us < attrs->sample_interval) + return false; + return true; } /** @@ -605,10 +675,10 @@ static void damon_update_monitoring_results(struct damon_ctx *ctx, * @ctx: monitoring context * @attrs: monitoring attributes * - * This function should be called while the kdamond is not running, or an - * access check results aggregation is not ongoing (e.g., from - * &struct damon_callback->after_aggregation or - * &struct damon_callback->after_wmarks_check callbacks). + * This function should be called while the kdamond is not running, an access + * check results aggregation is not ongoing (e.g., from &struct + * damon_callback->after_aggregation or &struct + * damon_callback->after_wmarks_check callbacks), or from damon_call(). * * Every time interval is in micro-seconds. * @@ -619,6 +689,11 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) unsigned long sample_interval = attrs->sample_interval ? attrs->sample_interval : 1; struct damos *s; + bool aggregating = ctx->passed_sample_intervals < + ctx->next_aggregation_sis; + + if (!damon_valid_intervals_goal(attrs)) + return -EINVAL; if (attrs->min_nr_regions < 3) return -EINVAL; @@ -627,12 +702,16 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) if (attrs->sample_interval > attrs->aggr_interval) return -EINVAL; + /* calls from core-external doesn't set this. */ + if (!attrs->aggr_samples) + attrs->aggr_samples = attrs->aggr_interval / sample_interval; + ctx->next_aggregation_sis = ctx->passed_sample_intervals + attrs->aggr_interval / sample_interval; ctx->next_ops_update_sis = ctx->passed_sample_intervals + attrs->ops_update_interval / sample_interval; - damon_update_monitoring_results(ctx, attrs); + damon_update_monitoring_results(ctx, attrs, aggregating); ctx->attrs = *attrs; damon_for_each_scheme(s, ctx) @@ -662,6 +741,442 @@ void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, damon_add_scheme(ctx, schemes[i]); } +static struct damos_quota_goal *damos_nth_quota_goal( + int n, struct damos_quota *q) +{ + struct damos_quota_goal *goal; + int i = 0; + + damos_for_each_quota_goal(goal, q) { + if (i++ == n) + return goal; + } + return NULL; +} + +static void damos_commit_quota_goal_union( + struct damos_quota_goal *dst, struct damos_quota_goal *src) +{ + switch (dst->metric) { + case DAMOS_QUOTA_NODE_MEM_USED_BP: + case DAMOS_QUOTA_NODE_MEM_FREE_BP: + dst->nid = src->nid; + break; + default: + break; + } +} + +static void damos_commit_quota_goal( + struct damos_quota_goal *dst, struct damos_quota_goal *src) +{ + dst->metric = src->metric; + dst->target_value = src->target_value; + if (dst->metric == DAMOS_QUOTA_USER_INPUT) + dst->current_value = src->current_value; + /* keep last_psi_total as is, since it will be updated in next cycle */ + damos_commit_quota_goal_union(dst, src); +} + +/** + * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota. + * @dst: The commit destination DAMOS quota. + * @src: The commit source DAMOS quota. + * + * Copies user-specified parameters for quota goals from @src to @dst. Users + * should use this function for quota goals-level parameters update of running + * DAMON contexts, instead of manual in-place updates. + * + * This function should be called from parameters-update safe context, like + * DAMON callbacks. + */ +int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src) +{ + struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal; + int i = 0, j = 0; + + damos_for_each_quota_goal_safe(dst_goal, next, dst) { + src_goal = damos_nth_quota_goal(i++, src); + if (src_goal) + damos_commit_quota_goal(dst_goal, src_goal); + else + damos_destroy_quota_goal(dst_goal); + } + damos_for_each_quota_goal_safe(src_goal, next, src) { + if (j++ < i) + continue; + new_goal = damos_new_quota_goal( + src_goal->metric, src_goal->target_value); + if (!new_goal) + return -ENOMEM; + damos_commit_quota_goal_union(new_goal, src_goal); + damos_add_quota_goal(dst, new_goal); + } + return 0; +} + +static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src) +{ + int err; + + dst->reset_interval = src->reset_interval; + dst->ms = src->ms; + dst->sz = src->sz; + err = damos_commit_quota_goals(dst, src); + if (err) + return err; + dst->weight_sz = src->weight_sz; + dst->weight_nr_accesses = src->weight_nr_accesses; + dst->weight_age = src->weight_age; + return 0; +} + +static struct damos_filter *damos_nth_filter(int n, struct damos *s) +{ + struct damos_filter *filter; + int i = 0; + + damos_for_each_filter(filter, s) { + if (i++ == n) + return filter; + } + return NULL; +} + +static void damos_commit_filter_arg( + struct damos_filter *dst, struct damos_filter *src) +{ + switch (dst->type) { + case DAMOS_FILTER_TYPE_MEMCG: + dst->memcg_id = src->memcg_id; + break; + case DAMOS_FILTER_TYPE_ADDR: + dst->addr_range = src->addr_range; + break; + case DAMOS_FILTER_TYPE_TARGET: + dst->target_idx = src->target_idx; + break; + case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: + dst->sz_range = src->sz_range; + break; + default: + break; + } +} + +static void damos_commit_filter( + struct damos_filter *dst, struct damos_filter *src) +{ + dst->type = src->type; + dst->matching = src->matching; + damos_commit_filter_arg(dst, src); +} + +static int damos_commit_core_filters(struct damos *dst, struct damos *src) +{ + struct damos_filter *dst_filter, *next, *src_filter, *new_filter; + int i = 0, j = 0; + + damos_for_each_filter_safe(dst_filter, next, dst) { + src_filter = damos_nth_filter(i++, src); + if (src_filter) + damos_commit_filter(dst_filter, src_filter); + else + damos_destroy_filter(dst_filter); + } + + damos_for_each_filter_safe(src_filter, next, src) { + if (j++ < i) + continue; + + new_filter = damos_new_filter( + src_filter->type, src_filter->matching, + src_filter->allow); + if (!new_filter) + return -ENOMEM; + damos_commit_filter_arg(new_filter, src_filter); + damos_add_filter(dst, new_filter); + } + return 0; +} + +static int damos_commit_ops_filters(struct damos *dst, struct damos *src) +{ + struct damos_filter *dst_filter, *next, *src_filter, *new_filter; + int i = 0, j = 0; + + damos_for_each_ops_filter_safe(dst_filter, next, dst) { + src_filter = damos_nth_filter(i++, src); + if (src_filter) + damos_commit_filter(dst_filter, src_filter); + else + damos_destroy_filter(dst_filter); + } + + damos_for_each_ops_filter_safe(src_filter, next, src) { + if (j++ < i) + continue; + + new_filter = damos_new_filter( + src_filter->type, src_filter->matching, + src_filter->allow); + if (!new_filter) + return -ENOMEM; + damos_commit_filter_arg(new_filter, src_filter); + damos_add_filter(dst, new_filter); + } + return 0; +} + +/** + * damos_filters_default_reject() - decide whether to reject memory that didn't + * match with any given filter. + * @filters: Given DAMOS filters of a group. + */ +static bool damos_filters_default_reject(struct list_head *filters) +{ + struct damos_filter *last_filter; + + if (list_empty(filters)) + return false; + last_filter = list_last_entry(filters, struct damos_filter, list); + return last_filter->allow; +} + +static void damos_set_filters_default_reject(struct damos *s) +{ + if (!list_empty(&s->ops_filters)) + s->core_filters_default_reject = false; + else + s->core_filters_default_reject = + damos_filters_default_reject(&s->filters); + s->ops_filters_default_reject = + damos_filters_default_reject(&s->ops_filters); +} + +static int damos_commit_filters(struct damos *dst, struct damos *src) +{ + int err; + + err = damos_commit_core_filters(dst, src); + if (err) + return err; + err = damos_commit_ops_filters(dst, src); + if (err) + return err; + damos_set_filters_default_reject(dst); + return 0; +} + +static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx) +{ + struct damos *s; + int i = 0; + + damon_for_each_scheme(s, ctx) { + if (i++ == n) + return s; + } + return NULL; +} + +static int damos_commit(struct damos *dst, struct damos *src) +{ + int err; + + dst->pattern = src->pattern; + dst->action = src->action; + dst->apply_interval_us = src->apply_interval_us; + + err = damos_commit_quota(&dst->quota, &src->quota); + if (err) + return err; + + dst->wmarks = src->wmarks; + + err = damos_commit_filters(dst, src); + return err; +} + +static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src) +{ + struct damos *dst_scheme, *next, *src_scheme, *new_scheme; + int i = 0, j = 0, err; + + damon_for_each_scheme_safe(dst_scheme, next, dst) { + src_scheme = damon_nth_scheme(i++, src); + if (src_scheme) { + err = damos_commit(dst_scheme, src_scheme); + if (err) + return err; + } else { + damon_destroy_scheme(dst_scheme); + } + } + + damon_for_each_scheme_safe(src_scheme, next, src) { + if (j++ < i) + continue; + new_scheme = damon_new_scheme(&src_scheme->pattern, + src_scheme->action, + src_scheme->apply_interval_us, + &src_scheme->quota, &src_scheme->wmarks, + NUMA_NO_NODE); + if (!new_scheme) + return -ENOMEM; + err = damos_commit(new_scheme, src_scheme); + if (err) { + damon_destroy_scheme(new_scheme); + return err; + } + damon_add_scheme(dst, new_scheme); + } + return 0; +} + +static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx) +{ + struct damon_target *t; + int i = 0; + + damon_for_each_target(t, ctx) { + if (i++ == n) + return t; + } + return NULL; +} + +/* + * The caller should ensure the regions of @src are + * 1. valid (end >= src) and + * 2. sorted by starting address. + * + * If @src has no region, @dst keeps current regions. + */ +static int damon_commit_target_regions( + struct damon_target *dst, struct damon_target *src) +{ + struct damon_region *src_region; + struct damon_addr_range *ranges; + int i = 0, err; + + damon_for_each_region(src_region, src) + i++; + if (!i) + return 0; + + ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN); + if (!ranges) + return -ENOMEM; + i = 0; + damon_for_each_region(src_region, src) + ranges[i++] = src_region->ar; + err = damon_set_regions(dst, ranges, i); + kfree(ranges); + return err; +} + +static int damon_commit_target( + struct damon_target *dst, bool dst_has_pid, + struct damon_target *src, bool src_has_pid) +{ + int err; + + err = damon_commit_target_regions(dst, src); + if (err) + return err; + if (dst_has_pid) + put_pid(dst->pid); + if (src_has_pid) + get_pid(src->pid); + dst->pid = src->pid; + return 0; +} + +static int damon_commit_targets( + struct damon_ctx *dst, struct damon_ctx *src) +{ + struct damon_target *dst_target, *next, *src_target, *new_target; + int i = 0, j = 0, err; + + damon_for_each_target_safe(dst_target, next, dst) { + src_target = damon_nth_target(i++, src); + if (src_target) { + err = damon_commit_target( + dst_target, damon_target_has_pid(dst), + src_target, damon_target_has_pid(src)); + if (err) + return err; + } else { + struct damos *s; + + if (damon_target_has_pid(dst)) + put_pid(dst_target->pid); + damon_destroy_target(dst_target); + damon_for_each_scheme(s, dst) { + if (s->quota.charge_target_from == dst_target) { + s->quota.charge_target_from = NULL; + s->quota.charge_addr_from = 0; + } + } + } + } + + damon_for_each_target_safe(src_target, next, src) { + if (j++ < i) + continue; + new_target = damon_new_target(); + if (!new_target) + return -ENOMEM; + err = damon_commit_target(new_target, false, + src_target, damon_target_has_pid(src)); + if (err) { + damon_destroy_target(new_target); + return err; + } + damon_add_target(dst, new_target); + } + return 0; +} + +/** + * damon_commit_ctx() - Commit parameters of a DAMON context to another. + * @dst: The commit destination DAMON context. + * @src: The commit source DAMON context. + * + * This function copies user-specified parameters from @src to @dst and update + * the internal status and results accordingly. Users should use this function + * for context-level parameters update of running context, instead of manual + * in-place updates. + * + * This function should be called from parameters-update safe context, like + * DAMON callbacks. + */ +int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src) +{ + int err; + + err = damon_commit_schemes(dst, src); + if (err) + return err; + err = damon_commit_targets(dst, src); + if (err) + return err; + /* + * schemes and targets should be updated first, since + * 1. damon_set_attrs() updates monitoring results of targets and + * next_apply_sis of schemes, and + * 2. ops update should be done after pid handling is done (target + * committing require putting pids). + */ + err = damon_set_attrs(dst, &src->attrs); + if (err) + return err; + dst->ops = src->ops; + + return 0; +} + /** * damon_nr_running_ctxs() - Return number of currently running contexts. */ @@ -811,6 +1326,107 @@ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) return err; } +static bool damon_is_running(struct damon_ctx *ctx) +{ + bool running; + + mutex_lock(&ctx->kdamond_lock); + running = ctx->kdamond != NULL; + mutex_unlock(&ctx->kdamond_lock); + return running; +} + +/** + * damon_call() - Invoke a given function on DAMON worker thread (kdamond). + * @ctx: DAMON context to call the function for. + * @control: Control variable of the call request. + * + * Ask DAMON worker thread (kdamond) of @ctx to call a function with an + * argument data that respectively passed via &damon_call_control->fn and + * &damon_call_control->data of @control, and wait until the kdamond finishes + * handling of the request. + * + * The kdamond executes the function with the argument in the main loop, just + * after a sampling of the iteration is finished. The function can hence + * safely access the internal data of the &struct damon_ctx without additional + * synchronization. The return value of the function will be saved in + * &damon_call_control->return_code. + * + * Return: 0 on success, negative error code otherwise. + */ +int damon_call(struct damon_ctx *ctx, struct damon_call_control *control) +{ + init_completion(&control->completion); + control->canceled = false; + + mutex_lock(&ctx->call_control_lock); + if (ctx->call_control) { + mutex_unlock(&ctx->call_control_lock); + return -EBUSY; + } + ctx->call_control = control; + mutex_unlock(&ctx->call_control_lock); + if (!damon_is_running(ctx)) + return -EINVAL; + wait_for_completion(&control->completion); + if (control->canceled) + return -ECANCELED; + return 0; +} + +/** + * damos_walk() - Invoke a given functions while DAMOS walk regions. + * @ctx: DAMON context to call the functions for. + * @control: Control variable of the walk request. + * + * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region + * that the kdamond will apply DAMOS action to, and wait until the kdamond + * finishes handling of the request. + * + * The kdamond executes the given function in the main loop, for each region + * just after it applied any DAMOS actions of @ctx to it. The invocation is + * made only within one &damos->apply_interval_us since damos_walk() + * invocation, for each scheme. The given callback function can hence safely + * access the internal data of &struct damon_ctx and &struct damon_region that + * each of the scheme will apply the action for next interval, without + * additional synchronizations against the kdamond. If every scheme of @ctx + * passed at least one &damos->apply_interval_us, kdamond marks the request as + * completed so that damos_walk() can wakeup and return. + * + * Return: 0 on success, negative error code otherwise. + */ +int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control) +{ + init_completion(&control->completion); + control->canceled = false; + mutex_lock(&ctx->walk_control_lock); + if (ctx->walk_control) { + mutex_unlock(&ctx->walk_control_lock); + return -EBUSY; + } + ctx->walk_control = control; + mutex_unlock(&ctx->walk_control_lock); + if (!damon_is_running(ctx)) + return -EINVAL; + wait_for_completion(&control->completion); + if (control->canceled) + return -ECANCELED; + return 0; +} + +/* + * Warn and fix corrupted ->nr_accesses[_bp] for investigations and preventing + * the problem being propagated. + */ +static void damon_warn_fix_nr_accesses_corruption(struct damon_region *r) +{ + if (r->nr_accesses_bp == r->nr_accesses * 10000) + return; + WARN_ONCE(true, "invalid nr_accesses_bp at reset: %u %u\n", + r->nr_accesses_bp, r->nr_accesses); + r->nr_accesses_bp = r->nr_accesses * 10000; +} + /* * Reset the aggregated monitoring results ('nr_accesses' of each region). */ @@ -824,6 +1440,7 @@ static void kdamond_reset_aggregated(struct damon_ctx *c) damon_for_each_region(r, t) { trace_damon_aggregated(ti, r, damon_nr_regions(t)); + damon_warn_fix_nr_accesses_corruption(r); r->last_nr_accesses = r->nr_accesses; r->nr_accesses = 0; } @@ -831,6 +1448,66 @@ static void kdamond_reset_aggregated(struct damon_ctx *c) } } +static unsigned long damon_get_intervals_score(struct damon_ctx *c) +{ + struct damon_target *t; + struct damon_region *r; + unsigned long sz_region, max_access_events = 0, access_events = 0; + unsigned long target_access_events; + unsigned long goal_bp = c->attrs.intervals_goal.access_bp; + + damon_for_each_target(t, c) { + damon_for_each_region(r, t) { + sz_region = damon_sz_region(r); + max_access_events += sz_region * c->attrs.aggr_samples; + access_events += sz_region * r->nr_accesses; + } + } + target_access_events = max_access_events * goal_bp / 10000; + target_access_events = target_access_events ? : 1; + return access_events * 10000 / target_access_events; +} + +static unsigned long damon_feed_loop_next_input(unsigned long last_input, + unsigned long score); + +static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c) +{ + unsigned long score_bp, adaptation_bp; + + score_bp = damon_get_intervals_score(c); + adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) / + 10000; + /* + * adaptaion_bp ranges from 1 to 20,000. Avoid too rapid reduction of + * the intervals by rescaling [1,10,000] to [5000, 10,000]. + */ + if (adaptation_bp <= 10000) + adaptation_bp = 5000 + adaptation_bp / 2; + return adaptation_bp; +} + +static void kdamond_tune_intervals(struct damon_ctx *c) +{ + unsigned long adaptation_bp; + struct damon_attrs new_attrs; + struct damon_intervals_goal *goal; + + adaptation_bp = damon_get_intervals_adaptation_bp(c); + if (adaptation_bp == 10000) + return; + + new_attrs = c->attrs; + goal = &c->attrs.intervals_goal; + new_attrs.sample_interval = min(goal->max_sample_us, + c->attrs.sample_interval * adaptation_bp / 10000); + new_attrs.sample_interval = max(goal->min_sample_us, + new_attrs.sample_interval); + new_attrs.aggr_interval = new_attrs.sample_interval * + c->attrs.aggr_samples; + damon_set_attrs(c, &new_attrs); +} + static void damon_split_region_at(struct damon_target *t, struct damon_region *r, unsigned long sz_r); @@ -921,16 +1598,18 @@ static bool damos_skip_charged_region(struct damon_target *t, } static void damos_update_stat(struct damos *s, - unsigned long sz_tried, unsigned long sz_applied) + unsigned long sz_tried, unsigned long sz_applied, + unsigned long sz_ops_filter_passed) { s->stat.nr_tried++; s->stat.sz_tried += sz_tried; if (sz_applied) s->stat.nr_applied++; s->stat.sz_applied += sz_applied; + s->stat.sz_ops_filter_passed += sz_ops_filter_passed; } -static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, +static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, struct damos_filter *filter) { bool matched = false; @@ -983,11 +1662,102 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, { struct damos_filter *filter; + s->core_filters_allowed = false; damos_for_each_filter(filter, s) { - if (__damos_filter_out(ctx, t, r, filter)) - return true; + if (damos_filter_match(ctx, t, r, filter)) { + if (filter->allow) + s->core_filters_allowed = true; + return !filter->allow; + } } - return false; + return s->core_filters_default_reject; +} + +/* + * damos_walk_call_walk() - Call &damos_walk_control->walk_fn. + * @ctx: The context of &damon_ctx->walk_control. + * @t: The monitoring target of @r that @s will be applied. + * @r: The region of @t that @s will be applied. + * @s: The scheme of @ctx that will be applied to @r. + * + * This function is called from kdamond whenever it asked the operation set to + * apply a DAMOS scheme action to a region. If a DAMOS walk request is + * installed by damos_walk() and not yet uninstalled, invoke it. + */ +static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t, + struct damon_region *r, struct damos *s, + unsigned long sz_filter_passed) +{ + struct damos_walk_control *control; + + if (s->walk_completed) + return; + + control = ctx->walk_control; + if (!control) + return; + + control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed); +} + +/* + * damos_walk_complete() - Complete DAMOS walk request if all walks are done. + * @ctx: The context of &damon_ctx->walk_control. + * @s: A scheme of @ctx that all walks are now done. + * + * This function is called when kdamond finished applying the action of a DAMOS + * scheme to all regions that eligible for the given &damos->apply_interval_us. + * If every scheme of @ctx including @s now finished walking for at least one + * &damos->apply_interval_us, this function makrs the handling of the given + * DAMOS walk request is done, so that damos_walk() can wake up and return. + */ +static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s) +{ + struct damos *siter; + struct damos_walk_control *control; + + control = ctx->walk_control; + if (!control) + return; + + s->walk_completed = true; + /* if all schemes completed, signal completion to walker */ + damon_for_each_scheme(siter, ctx) { + if (!siter->walk_completed) + return; + } + damon_for_each_scheme(siter, ctx) + siter->walk_completed = false; + + complete(&control->completion); + ctx->walk_control = NULL; +} + +/* + * damos_walk_cancel() - Cancel the current DAMOS walk request. + * @ctx: The context of &damon_ctx->walk_control. + * + * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS + * walk is requested but there is no DAMOS scheme to walk for, or the kdamond + * is already out of the main loop and therefore gonna be terminated, and hence + * cannot continue the walks. This function therefore marks the walk request + * as canceled, so that damos_walk() can wake up and return. + */ +static void damos_walk_cancel(struct damon_ctx *ctx) +{ + struct damos_walk_control *control; + + mutex_lock(&ctx->walk_control_lock); + control = ctx->walk_control; + mutex_unlock(&ctx->walk_control_lock); + + if (!control) + return; + control->canceled = true; + complete(&control->completion); + mutex_lock(&ctx->walk_control_lock); + ctx->walk_control = NULL; + mutex_unlock(&ctx->walk_control_lock); } static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, @@ -997,7 +1767,7 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, unsigned long sz = damon_sz_region(r); struct timespec64 begin, end; unsigned long sz_applied = 0; - int err = 0; + unsigned long sz_ops_filter_passed = 0; /* * We plan to support multiple context per kdamond, as DAMON sysfs * implies with 'nr_contexts' file. Nevertheless, only single context @@ -1037,13 +1807,11 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, if (damos_filter_out(c, t, r, s)) return; ktime_get_coarse_ts64(&begin); - if (c->callback.before_damos_apply) - err = c->callback.before_damos_apply(c, t, r, s); - if (!err) { - trace_damos_before_apply(cidx, sidx, tidx, r, - damon_nr_regions(t), do_trace); - sz_applied = c->ops.apply_scheme(c, t, r, s); - } + trace_damos_before_apply(cidx, sidx, tidx, r, + damon_nr_regions(t), do_trace); + sz_applied = c->ops.apply_scheme(c, t, r, s, + &sz_ops_filter_passed); + damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed); ktime_get_coarse_ts64(&end); quota->total_charged_ns += timespec64_to_ns(&end) - timespec64_to_ns(&begin); @@ -1057,7 +1825,7 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, r->age = 0; update_stat: - damos_update_stat(s, sz, sz_applied); + damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed); } static void damon_do_apply_schemes(struct damon_ctx *c, @@ -1069,7 +1837,7 @@ static void damon_do_apply_schemes(struct damon_ctx *c, damon_for_each_scheme(s, c) { struct damos_quota *quota = &s->quota; - if (c->passed_sample_intervals != s->next_apply_sis) + if (c->passed_sample_intervals < s->next_apply_sis) continue; if (!s->wmarks.activated) @@ -1113,17 +1881,31 @@ static unsigned long damon_feed_loop_next_input(unsigned long last_input, unsigned long score) { const unsigned long goal = 10000; - unsigned long score_goal_diff = max(goal, score) - min(goal, score); - unsigned long score_goal_diff_bp = score_goal_diff * 10000 / goal; - unsigned long compensation = last_input * score_goal_diff_bp / 10000; /* Set minimum input as 10000 to avoid compensation be zero */ const unsigned long min_input = 10000; + unsigned long score_goal_diff, compensation; + bool over_achieving = score > goal; + + if (score == goal) + return last_input; + if (score >= goal * 2) + return min_input; - if (goal > score) + if (over_achieving) + score_goal_diff = score - goal; + else + score_goal_diff = goal - score; + + if (last_input < ULONG_MAX / score_goal_diff) + compensation = last_input * score_goal_diff / goal; + else + compensation = last_input / goal * score_goal_diff; + + if (over_achieving) + return max(last_input - compensation, min_input); + if (last_input < ULONG_MAX - compensation) return last_input + compensation; - if (last_input > compensation + min_input) - return last_input - compensation; - return min_input; + return ULONG_MAX; } #ifdef CONFIG_PSI @@ -1145,6 +1927,29 @@ static inline u64 damos_get_some_mem_psi_total(void) #endif /* CONFIG_PSI */ +#ifdef CONFIG_NUMA +static __kernel_ulong_t damos_get_node_mem_bp( + struct damos_quota_goal *goal) +{ + struct sysinfo i; + __kernel_ulong_t numerator; + + si_meminfo_node(&i, goal->nid); + if (goal->metric == DAMOS_QUOTA_NODE_MEM_USED_BP) + numerator = i.totalram - i.freeram; + else /* DAMOS_QUOTA_NODE_MEM_FREE_BP */ + numerator = i.freeram; + return numerator * 10000 / i.totalram; +} +#else +static __kernel_ulong_t damos_get_node_mem_bp( + struct damos_quota_goal *goal) +{ + return 0; +} +#endif + + static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal) { u64 now_psi_total; @@ -1158,6 +1963,10 @@ static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal) goal->current_value = now_psi_total - goal->last_psi_total; goal->last_psi_total = now_psi_total; break; + case DAMOS_QUOTA_NODE_MEM_USED_BP: + case DAMOS_QUOTA_NODE_MEM_FREE_BP: + goal->current_value = damos_get_node_mem_bp(goal); + break; default: break; } @@ -1185,7 +1994,7 @@ static unsigned long damos_quota_score(struct damos_quota *quota) static void damos_set_effective_quota(struct damos_quota *quota) { unsigned long throughput; - unsigned long esz; + unsigned long esz = ULONG_MAX; if (!quota->ms && list_empty("a->goals)) { quota->esz = quota->sz; @@ -1207,10 +2016,7 @@ static void damos_set_effective_quota(struct damos_quota *quota) quota->total_charged_ns; else throughput = PAGE_SIZE * 1024; - if (!list_empty("a->goals)) - esz = min(throughput * quota->ms, esz); - else - esz = throughput * quota->ms; + esz = min(throughput * quota->ms, esz); } if (quota->sz && quota->sz < esz) @@ -1245,13 +2051,16 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) return; /* Fill up the score histogram */ - memset(quota->histogram, 0, sizeof(quota->histogram)); + memset(c->regions_score_histogram, 0, + sizeof(*c->regions_score_histogram) * + (DAMOS_MAX_SCORE + 1)); damon_for_each_target(t, c) { damon_for_each_region(r, t) { if (!__damos_valid_target(r, s)) continue; score = c->ops.get_scheme_score(c, t, r, s); - quota->histogram[score] += damon_sz_region(r); + c->regions_score_histogram[score] += + damon_sz_region(r); if (score > max_score) max_score = score; } @@ -1259,7 +2068,7 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) /* Set the min score limit */ for (cumulated_sz = 0, score = max_score; ; score--) { - cumulated_sz += quota->histogram[score]; + cumulated_sz += c->regions_score_histogram[score]; if (cumulated_sz >= quota->esz || !score) break; } @@ -1276,7 +2085,7 @@ static void kdamond_apply_schemes(struct damon_ctx *c) bool has_schemes_to_apply = false; damon_for_each_scheme(s, c) { - if (c->passed_sample_intervals != s->next_apply_sis) + if (c->passed_sample_intervals < s->next_apply_sis) continue; if (!s->wmarks.activated) @@ -1290,18 +2099,22 @@ static void kdamond_apply_schemes(struct damon_ctx *c) if (!has_schemes_to_apply) return; + mutex_lock(&c->walk_control_lock); damon_for_each_target(t, c) { damon_for_each_region_safe(r, next_r, t) damon_do_apply_schemes(c, t, r); } damon_for_each_scheme(s, c) { - if (c->passed_sample_intervals != s->next_apply_sis) + if (c->passed_sample_intervals < s->next_apply_sis) continue; - s->next_apply_sis += + damos_walk_complete(c, s); + s->next_apply_sis = c->passed_sample_intervals + (s->apply_interval_us ? s->apply_interval_us : c->attrs.aggr_interval) / sample_interval; + s->last_applied = NULL; } + mutex_unlock(&c->walk_control_lock); } /* @@ -1357,14 +2170,31 @@ static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, * access frequencies are similar. This is for minimizing the monitoring * overhead under the dynamically changeable access pattern. If a merge was * unnecessarily made, later 'kdamond_split_regions()' will revert it. + * + * The total number of regions could be higher than the user-defined limit, + * max_nr_regions for some cases. For example, the user can update + * max_nr_regions to a number that lower than the current number of regions + * while DAMON is running. For such a case, repeat merging until the limit is + * met while increasing @threshold up to possible maximum level. */ static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, unsigned long sz_limit) { struct damon_target *t; - - damon_for_each_target(t, c) - damon_merge_regions_of(t, threshold, sz_limit); + unsigned int nr_regions; + unsigned int max_thres; + + max_thres = c->attrs.aggr_interval / + (c->attrs.sample_interval ? c->attrs.sample_interval : 1); + do { + nr_regions = 0; + damon_for_each_target(t, c) { + damon_merge_regions_of(t, threshold, sz_limit); + nr_regions += damon_nr_regions(t); + } + threshold = max(1, threshold * 2); + } while (nr_regions > c->attrs.max_nr_regions && + threshold / 2 < max_thres); } /* @@ -1480,12 +2310,14 @@ static bool kdamond_need_stop(struct damon_ctx *ctx) return true; } -static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric) +static int damos_get_wmark_metric_value(enum damos_wmark_metric metric, + unsigned long *metric_value) { switch (metric) { case DAMOS_WMARK_FREE_MEM_RATE: - return global_zone_page_state(NR_FREE_PAGES) * 1000 / + *metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 / totalram_pages(); + return 0; default: break; } @@ -1500,17 +2332,15 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme) { unsigned long metric; - if (scheme->wmarks.metric == DAMOS_WMARK_NONE) + if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric)) return 0; - metric = damos_wmark_metric_value(scheme->wmarks.metric); /* higher than high watermark or lower than low watermark */ if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { if (scheme->wmarks.activated) pr_debug("deactivate a scheme (%d) for %s wmark\n", - scheme->action, - metric > scheme->wmarks.high ? - "high" : "low"); + scheme->action, + str_high_low(metric > scheme->wmarks.high)); scheme->wmarks.activated = false; return scheme->wmarks.interval; } @@ -1528,11 +2358,42 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme) static void kdamond_usleep(unsigned long usecs) { - /* See Documentation/timers/timers-howto.rst for the thresholds */ - if (usecs > 20 * USEC_PER_MSEC) + if (usecs >= USLEEP_RANGE_UPPER_BOUND) schedule_timeout_idle(usecs_to_jiffies(usecs)); else - usleep_idle_range(usecs, usecs + 1); + usleep_range_idle(usecs, usecs + 1); +} + +/* + * kdamond_call() - handle damon_call_control. + * @ctx: The &struct damon_ctx of the kdamond. + * @cancel: Whether to cancel the invocation of the function. + * + * If there is a &struct damon_call_control request that registered via + * &damon_call() on @ctx, do or cancel the invocation of the function depending + * on @cancel. @cancel is set when the kdamond is already out of the main loop + * and therefore will be terminated. + */ +static void kdamond_call(struct damon_ctx *ctx, bool cancel) +{ + struct damon_call_control *control; + int ret = 0; + + mutex_lock(&ctx->call_control_lock); + control = ctx->call_control; + mutex_unlock(&ctx->call_control_lock); + if (!control) + return; + if (cancel) { + control->canceled = true; + } else { + ret = control->fn(control->data); + control->return_code = ret; + } + complete(&control->completion); + mutex_lock(&ctx->call_control_lock); + ctx->call_control = NULL; + mutex_unlock(&ctx->call_control_lock); } /* Returns negative error code if it's not activated but should return */ @@ -1559,11 +2420,13 @@ static int kdamond_wait_activation(struct damon_ctx *ctx) if (ctx->callback.after_wmarks_check && ctx->callback.after_wmarks_check(ctx)) break; + kdamond_call(ctx, false); + damos_walk_cancel(ctx); } return -EBUSY; } -static void kdamond_init_intervals_sis(struct damon_ctx *ctx) +static void kdamond_init_ctx(struct damon_ctx *ctx) { unsigned long sample_interval = ctx->attrs.sample_interval ? ctx->attrs.sample_interval : 1; @@ -1574,11 +2437,14 @@ static void kdamond_init_intervals_sis(struct damon_ctx *ctx) ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / sample_interval; + ctx->next_intervals_tune_sis = ctx->next_aggregation_sis * + ctx->attrs.intervals_goal.aggrs; damon_for_each_scheme(scheme, ctx) { apply_interval = scheme->apply_interval_us ? scheme->apply_interval_us : ctx->attrs.aggr_interval; scheme->next_apply_sis = apply_interval / sample_interval; + damos_set_filters_default_reject(scheme); } } @@ -1596,11 +2462,13 @@ static int kdamond_fn(void *data) pr_debug("kdamond (%d) starts\n", current->pid); complete(&ctx->kdamond_started); - kdamond_init_intervals_sis(ctx); + kdamond_init_ctx(ctx); if (ctx->ops.init) ctx->ops.init(ctx); - if (ctx->callback.before_start && ctx->callback.before_start(ctx)) + ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1, + sizeof(*ctx->regions_score_histogram), GFP_KERNEL); + if (!ctx->regions_score_histogram) goto done; sz_limit = damon_region_sz_limit(ctx); @@ -1622,9 +2490,6 @@ static int kdamond_fn(void *data) if (ctx->ops.prepare_access_checks) ctx->ops.prepare_access_checks(ctx); - if (ctx->callback.after_sampling && - ctx->callback.after_sampling(ctx)) - break; kdamond_usleep(sample_interval); ctx->passed_sample_intervals++; @@ -1632,7 +2497,7 @@ static int kdamond_fn(void *data) if (ctx->ops.check_accesses) max_nr_accesses = ctx->ops.check_accesses(ctx); - if (ctx->passed_sample_intervals == next_aggregation_sis) { + if (ctx->passed_sample_intervals >= next_aggregation_sis) { kdamond_merge_regions(ctx, max_nr_accesses / 10, sz_limit); @@ -1642,25 +2507,55 @@ static int kdamond_fn(void *data) } /* - * do kdamond_apply_schemes() after kdamond_merge_regions() if - * possible, to reduce overhead + * do kdamond_call() and kdamond_apply_schemes() after + * kdamond_merge_regions() if possible, to reduce overhead */ + kdamond_call(ctx, false); if (!list_empty(&ctx->schemes)) kdamond_apply_schemes(ctx); + else + damos_walk_cancel(ctx); sample_interval = ctx->attrs.sample_interval ? ctx->attrs.sample_interval : 1; - if (ctx->passed_sample_intervals == next_aggregation_sis) { + if (ctx->passed_sample_intervals >= next_aggregation_sis) { + if (ctx->attrs.intervals_goal.aggrs && + ctx->passed_sample_intervals >= + ctx->next_intervals_tune_sis) { + /* + * ctx->next_aggregation_sis might be updated + * from kdamond_call(). In the case, + * damon_set_attrs() which will be called from + * kdamond_tune_interval() may wrongly think + * this is in the middle of the current + * aggregation, and make aggregation + * information reset for all regions. Then, + * following kdamond_reset_aggregated() call + * will make the region information invalid, + * particularly for ->nr_accesses_bp. + * + * Reset ->next_aggregation_sis to avoid that. + * It will anyway correctly updated after this + * if caluse. + */ + ctx->next_aggregation_sis = + next_aggregation_sis; + ctx->next_intervals_tune_sis += + ctx->attrs.aggr_samples * + ctx->attrs.intervals_goal.aggrs; + kdamond_tune_intervals(ctx); + sample_interval = ctx->attrs.sample_interval ? + ctx->attrs.sample_interval : 1; + + } ctx->next_aggregation_sis = next_aggregation_sis + ctx->attrs.aggr_interval / sample_interval; kdamond_reset_aggregated(ctx); kdamond_split_regions(ctx); - if (ctx->ops.reset_aggregated) - ctx->ops.reset_aggregated(ctx); } - if (ctx->passed_sample_intervals == next_ops_update_sis) { + if (ctx->passed_sample_intervals >= next_ops_update_sis) { ctx->next_ops_update_sis = next_ops_update_sis + ctx->attrs.ops_update_interval / sample_interval; @@ -1679,12 +2574,16 @@ done: ctx->callback.before_terminate(ctx); if (ctx->ops.cleanup) ctx->ops.cleanup(ctx); + kfree(ctx->regions_score_histogram); pr_debug("kdamond (%d) finishes\n", current->pid); mutex_lock(&ctx->kdamond_lock); ctx->kdamond = NULL; mutex_unlock(&ctx->kdamond_lock); + kdamond_call(ctx, true); + damos_walk_cancel(ctx); + mutex_lock(&damon_lock); nr_running_ctxs--; if (!nr_running_ctxs && running_exclusive_ctxs) @@ -1850,4 +2749,4 @@ static int __init damon_init(void) subsys_initcall(damon_init); -#include "core-test.h" +#include "tests/core-kunit.h" diff --git a/mm/damon/dbgfs-test.h b/mm/damon/dbgfs-test.h deleted file mode 100644 index 2d85217f5ba4..000000000000 --- a/mm/damon/dbgfs-test.h +++ /dev/null @@ -1,163 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * DAMON Debugfs Interface Unit Tests - * - * Author: SeongJae Park <sj@kernel.org> - */ - -#ifdef CONFIG_DAMON_DBGFS_KUNIT_TEST - -#ifndef _DAMON_DBGFS_TEST_H -#define _DAMON_DBGFS_TEST_H - -#include <kunit/test.h> - -static void damon_dbgfs_test_str_to_ints(struct kunit *test) -{ - char *question; - int *answers; - int expected[] = {12, 35, 46}; - ssize_t nr_integers = 0, i; - - question = "123"; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); - KUNIT_EXPECT_EQ(test, 123, answers[0]); - kfree(answers); - - question = "123abc"; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); - KUNIT_EXPECT_EQ(test, 123, answers[0]); - kfree(answers); - - question = "a123"; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); - kfree(answers); - - question = "12 35"; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); - for (i = 0; i < nr_integers; i++) - KUNIT_EXPECT_EQ(test, expected[i], answers[i]); - kfree(answers); - - question = "12 35 46"; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers); - for (i = 0; i < nr_integers; i++) - KUNIT_EXPECT_EQ(test, expected[i], answers[i]); - kfree(answers); - - question = "12 35 abc 46"; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); - for (i = 0; i < 2; i++) - KUNIT_EXPECT_EQ(test, expected[i], answers[i]); - kfree(answers); - - question = ""; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); - kfree(answers); - - question = "\n"; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); - kfree(answers); -} - -static void damon_dbgfs_test_set_targets(struct kunit *test) -{ - struct damon_ctx *ctx = dbgfs_new_ctx(); - char buf[64]; - - /* Make DAMON consider target has no pid */ - damon_select_ops(ctx, DAMON_OPS_PADDR); - - dbgfs_set_targets(ctx, 0, NULL); - sprint_target_ids(ctx, buf, 64); - KUNIT_EXPECT_STREQ(test, (char *)buf, "\n"); - - dbgfs_set_targets(ctx, 1, NULL); - sprint_target_ids(ctx, buf, 64); - KUNIT_EXPECT_STREQ(test, (char *)buf, "42\n"); - - dbgfs_set_targets(ctx, 0, NULL); - sprint_target_ids(ctx, buf, 64); - KUNIT_EXPECT_STREQ(test, (char *)buf, "\n"); - - dbgfs_destroy_ctx(ctx); -} - -static void damon_dbgfs_test_set_init_regions(struct kunit *test) -{ - struct damon_ctx *ctx = damon_new_ctx(); - /* Each line represents one region in ``<target idx> <start> <end>`` */ - char * const valid_inputs[] = {"1 10 20\n 1 20 30\n1 35 45", - "1 10 20\n", - "1 10 20\n0 39 59\n0 70 134\n 1 20 25\n", - ""}; - /* Reading the file again will show sorted, clean output */ - char * const valid_expects[] = {"1 10 20\n1 20 30\n1 35 45\n", - "1 10 20\n", - "0 39 59\n0 70 134\n1 10 20\n1 20 25\n", - ""}; - char * const invalid_inputs[] = {"3 10 20\n", /* target not exists */ - "1 10 20\n 1 14 26\n", /* regions overlap */ - "0 10 20\n1 30 40\n 0 5 8"}; /* not sorted by address */ - char *input, *expect; - int i, rc; - char buf[256]; - - damon_select_ops(ctx, DAMON_OPS_PADDR); - - dbgfs_set_targets(ctx, 3, NULL); - - /* Put valid inputs and check the results */ - for (i = 0; i < ARRAY_SIZE(valid_inputs); i++) { - input = valid_inputs[i]; - expect = valid_expects[i]; - - rc = set_init_regions(ctx, input, strnlen(input, 256)); - KUNIT_EXPECT_EQ(test, rc, 0); - - memset(buf, 0, 256); - sprint_init_regions(ctx, buf, 256); - - KUNIT_EXPECT_STREQ(test, (char *)buf, expect); - } - /* Put invalid inputs and check the return error code */ - for (i = 0; i < ARRAY_SIZE(invalid_inputs); i++) { - input = invalid_inputs[i]; - pr_info("input: %s\n", input); - rc = set_init_regions(ctx, input, strnlen(input, 256)); - KUNIT_EXPECT_EQ(test, rc, -EINVAL); - - memset(buf, 0, 256); - sprint_init_regions(ctx, buf, 256); - - KUNIT_EXPECT_STREQ(test, (char *)buf, ""); - } - - dbgfs_set_targets(ctx, 0, NULL); - damon_destroy_ctx(ctx); -} - -static struct kunit_case damon_test_cases[] = { - KUNIT_CASE(damon_dbgfs_test_str_to_ints), - KUNIT_CASE(damon_dbgfs_test_set_targets), - KUNIT_CASE(damon_dbgfs_test_set_init_regions), - {}, -}; - -static struct kunit_suite damon_test_suite = { - .name = "damon-dbgfs", - .test_cases = damon_test_cases, -}; -kunit_test_suite(damon_test_suite); - -#endif /* _DAMON_TEST_H */ - -#endif /* CONFIG_DAMON_KUNIT_TEST */ diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c deleted file mode 100644 index 2461cfe2e968..000000000000 --- a/mm/damon/dbgfs.c +++ /dev/null @@ -1,1148 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * DAMON Debugfs Interface - * - * Author: SeongJae Park <sj@kernel.org> - */ - -#define pr_fmt(fmt) "damon-dbgfs: " fmt - -#include <linux/damon.h> -#include <linux/debugfs.h> -#include <linux/file.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/page_idle.h> -#include <linux/slab.h> - -#define DAMON_DBGFS_DEPRECATION_NOTICE \ - "DAMON debugfs interface is deprecated, so users should move " \ - "to DAMON_SYSFS. If you cannot, please report your usecase to " \ - "damon@lists.linux.dev and linux-mm@kvack.org.\n" - -static struct damon_ctx **dbgfs_ctxs; -static int dbgfs_nr_ctxs; -static struct dentry **dbgfs_dirs; -static DEFINE_MUTEX(damon_dbgfs_lock); - -static void damon_dbgfs_warn_deprecation(void) -{ - pr_warn_once(DAMON_DBGFS_DEPRECATION_NOTICE); -} - -/* - * Returns non-empty string on success, negative error code otherwise. - */ -static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos) -{ - char *kbuf; - ssize_t ret; - - /* We do not accept continuous write */ - if (*ppos) - return ERR_PTR(-EINVAL); - - kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN); - if (!kbuf) - return ERR_PTR(-ENOMEM); - - ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count); - if (ret != count) { - kfree(kbuf); - return ERR_PTR(-EIO); - } - kbuf[ret] = '\0'; - - return kbuf; -} - -static ssize_t dbgfs_attrs_read(struct file *file, - char __user *buf, size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - char kbuf[128]; - int ret; - - mutex_lock(&ctx->kdamond_lock); - ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n", - ctx->attrs.sample_interval, ctx->attrs.aggr_interval, - ctx->attrs.ops_update_interval, - ctx->attrs.min_nr_regions, ctx->attrs.max_nr_regions); - mutex_unlock(&ctx->kdamond_lock); - - return simple_read_from_buffer(buf, count, ppos, kbuf, ret); -} - -static ssize_t dbgfs_attrs_write(struct file *file, - const char __user *buf, size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - struct damon_attrs attrs; - char *kbuf; - ssize_t ret; - - kbuf = user_input_str(buf, count, ppos); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - - if (sscanf(kbuf, "%lu %lu %lu %lu %lu", - &attrs.sample_interval, &attrs.aggr_interval, - &attrs.ops_update_interval, - &attrs.min_nr_regions, - &attrs.max_nr_regions) != 5) { - ret = -EINVAL; - goto out; - } - - mutex_lock(&ctx->kdamond_lock); - if (ctx->kdamond) { - ret = -EBUSY; - goto unlock_out; - } - - ret = damon_set_attrs(ctx, &attrs); - if (!ret) - ret = count; -unlock_out: - mutex_unlock(&ctx->kdamond_lock); -out: - kfree(kbuf); - return ret; -} - -/* - * Return corresponding dbgfs' scheme action value (int) for the given - * damos_action if the given damos_action value is valid and supported by - * dbgfs, negative error code otherwise. - */ -static int damos_action_to_dbgfs_scheme_action(enum damos_action action) -{ - switch (action) { - case DAMOS_WILLNEED: - return 0; - case DAMOS_COLD: - return 1; - case DAMOS_PAGEOUT: - return 2; - case DAMOS_HUGEPAGE: - return 3; - case DAMOS_NOHUGEPAGE: - return 4; - case DAMOS_STAT: - return 5; - default: - return -EINVAL; - } -} - -static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len) -{ - struct damos *s; - int written = 0; - int rc; - - damon_for_each_scheme(s, c) { - rc = scnprintf(&buf[written], len - written, - "%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n", - s->pattern.min_sz_region, - s->pattern.max_sz_region, - s->pattern.min_nr_accesses, - s->pattern.max_nr_accesses, - s->pattern.min_age_region, - s->pattern.max_age_region, - damos_action_to_dbgfs_scheme_action(s->action), - s->quota.ms, s->quota.sz, - s->quota.reset_interval, - s->quota.weight_sz, - s->quota.weight_nr_accesses, - s->quota.weight_age, - s->wmarks.metric, s->wmarks.interval, - s->wmarks.high, s->wmarks.mid, s->wmarks.low, - s->stat.nr_tried, s->stat.sz_tried, - s->stat.nr_applied, s->stat.sz_applied, - s->stat.qt_exceeds); - if (!rc) - return -ENOMEM; - - written += rc; - } - return written; -} - -static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - char *kbuf; - ssize_t len; - - kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); - if (!kbuf) - return -ENOMEM; - - mutex_lock(&ctx->kdamond_lock); - len = sprint_schemes(ctx, kbuf, count); - mutex_unlock(&ctx->kdamond_lock); - if (len < 0) - goto out; - len = simple_read_from_buffer(buf, count, ppos, kbuf, len); - -out: - kfree(kbuf); - return len; -} - -static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes) -{ - ssize_t i; - - for (i = 0; i < nr_schemes; i++) - kfree(schemes[i]); - kfree(schemes); -} - -/* - * Return corresponding damos_action for the given dbgfs input for a scheme - * action if the input is valid, negative error code otherwise. - */ -static enum damos_action dbgfs_scheme_action_to_damos_action(int dbgfs_action) -{ - switch (dbgfs_action) { - case 0: - return DAMOS_WILLNEED; - case 1: - return DAMOS_COLD; - case 2: - return DAMOS_PAGEOUT; - case 3: - return DAMOS_HUGEPAGE; - case 4: - return DAMOS_NOHUGEPAGE; - case 5: - return DAMOS_STAT; - default: - return -EINVAL; - } -} - -/* - * Converts a string into an array of struct damos pointers - * - * Returns an array of struct damos pointers that converted if the conversion - * success, or NULL otherwise. - */ -static struct damos **str_to_schemes(const char *str, ssize_t len, - ssize_t *nr_schemes) -{ - struct damos *scheme, **schemes; - const int max_nr_schemes = 256; - int pos = 0, parsed, ret; - unsigned int action_input; - enum damos_action action; - - schemes = kmalloc_array(max_nr_schemes, sizeof(scheme), - GFP_KERNEL); - if (!schemes) - return NULL; - - *nr_schemes = 0; - while (pos < len && *nr_schemes < max_nr_schemes) { - struct damos_access_pattern pattern = {}; - struct damos_quota quota = {}; - struct damos_watermarks wmarks; - - ret = sscanf(&str[pos], - "%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n", - &pattern.min_sz_region, &pattern.max_sz_region, - &pattern.min_nr_accesses, - &pattern.max_nr_accesses, - &pattern.min_age_region, - &pattern.max_age_region, - &action_input, "a.ms, - "a.sz, "a.reset_interval, - "a.weight_sz, "a.weight_nr_accesses, - "a.weight_age, &wmarks.metric, - &wmarks.interval, &wmarks.high, &wmarks.mid, - &wmarks.low, &parsed); - if (ret != 18) - break; - action = dbgfs_scheme_action_to_damos_action(action_input); - if ((int)action < 0) - goto fail; - - if (pattern.min_sz_region > pattern.max_sz_region || - pattern.min_nr_accesses > pattern.max_nr_accesses || - pattern.min_age_region > pattern.max_age_region) - goto fail; - - if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low || - wmarks.mid < wmarks.low) - goto fail; - - pos += parsed; - scheme = damon_new_scheme(&pattern, action, 0, "a, - &wmarks); - if (!scheme) - goto fail; - - schemes[*nr_schemes] = scheme; - *nr_schemes += 1; - } - return schemes; -fail: - free_schemes_arr(schemes, *nr_schemes); - return NULL; -} - -static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - char *kbuf; - struct damos **schemes; - ssize_t nr_schemes = 0, ret; - - kbuf = user_input_str(buf, count, ppos); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - - schemes = str_to_schemes(kbuf, count, &nr_schemes); - if (!schemes) { - ret = -EINVAL; - goto out; - } - - mutex_lock(&ctx->kdamond_lock); - if (ctx->kdamond) { - ret = -EBUSY; - goto unlock_out; - } - - damon_set_schemes(ctx, schemes, nr_schemes); - ret = count; - nr_schemes = 0; - -unlock_out: - mutex_unlock(&ctx->kdamond_lock); - free_schemes_arr(schemes, nr_schemes); -out: - kfree(kbuf); - return ret; -} - -static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len) -{ - struct damon_target *t; - int id; - int written = 0; - int rc; - - damon_for_each_target(t, ctx) { - if (damon_target_has_pid(ctx)) - /* Show pid numbers to debugfs users */ - id = pid_vnr(t->pid); - else - /* Show 42 for physical address space, just for fun */ - id = 42; - - rc = scnprintf(&buf[written], len - written, "%d ", id); - if (!rc) - return -ENOMEM; - written += rc; - } - if (written) - written -= 1; - written += scnprintf(&buf[written], len - written, "\n"); - return written; -} - -static ssize_t dbgfs_target_ids_read(struct file *file, - char __user *buf, size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - ssize_t len; - char ids_buf[320]; - - mutex_lock(&ctx->kdamond_lock); - len = sprint_target_ids(ctx, ids_buf, 320); - mutex_unlock(&ctx->kdamond_lock); - if (len < 0) - return len; - - return simple_read_from_buffer(buf, count, ppos, ids_buf, len); -} - -/* - * Converts a string into an integers array - * - * Returns an array of integers array if the conversion success, or NULL - * otherwise. - */ -static int *str_to_ints(const char *str, ssize_t len, ssize_t *nr_ints) -{ - int *array; - const int max_nr_ints = 32; - int nr; - int pos = 0, parsed, ret; - - *nr_ints = 0; - array = kmalloc_array(max_nr_ints, sizeof(*array), GFP_KERNEL); - if (!array) - return NULL; - while (*nr_ints < max_nr_ints && pos < len) { - ret = sscanf(&str[pos], "%d%n", &nr, &parsed); - pos += parsed; - if (ret != 1) - break; - array[*nr_ints] = nr; - *nr_ints += 1; - } - - return array; -} - -static void dbgfs_put_pids(struct pid **pids, int nr_pids) -{ - int i; - - for (i = 0; i < nr_pids; i++) - put_pid(pids[i]); -} - -/* - * Converts a string into an struct pid pointers array - * - * Returns an array of struct pid pointers if the conversion success, or NULL - * otherwise. - */ -static struct pid **str_to_pids(const char *str, ssize_t len, ssize_t *nr_pids) -{ - int *ints; - ssize_t nr_ints; - struct pid **pids; - - *nr_pids = 0; - - ints = str_to_ints(str, len, &nr_ints); - if (!ints) - return NULL; - - pids = kmalloc_array(nr_ints, sizeof(*pids), GFP_KERNEL); - if (!pids) - goto out; - - for (; *nr_pids < nr_ints; (*nr_pids)++) { - pids[*nr_pids] = find_get_pid(ints[*nr_pids]); - if (!pids[*nr_pids]) { - dbgfs_put_pids(pids, *nr_pids); - kfree(ints); - kfree(pids); - return NULL; - } - } - -out: - kfree(ints); - return pids; -} - -/* - * dbgfs_set_targets() - Set monitoring targets. - * @ctx: monitoring context - * @nr_targets: number of targets - * @pids: array of target pids (size is same to @nr_targets) - * - * This function should not be called while the kdamond is running. @pids is - * ignored if the context is not configured to have pid in each target. On - * failure, reference counts of all pids in @pids are decremented. - * - * Return: 0 on success, negative error code otherwise. - */ -static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets, - struct pid **pids) -{ - ssize_t i; - struct damon_target *t, *next; - - damon_for_each_target_safe(t, next, ctx) { - if (damon_target_has_pid(ctx)) - put_pid(t->pid); - damon_destroy_target(t); - } - - for (i = 0; i < nr_targets; i++) { - t = damon_new_target(); - if (!t) { - damon_for_each_target_safe(t, next, ctx) - damon_destroy_target(t); - if (damon_target_has_pid(ctx)) - dbgfs_put_pids(pids, nr_targets); - return -ENOMEM; - } - if (damon_target_has_pid(ctx)) - t->pid = pids[i]; - damon_add_target(ctx, t); - } - - return 0; -} - -static ssize_t dbgfs_target_ids_write(struct file *file, - const char __user *buf, size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - bool id_is_pid = true; - char *kbuf; - struct pid **target_pids = NULL; - ssize_t nr_targets; - ssize_t ret; - - kbuf = user_input_str(buf, count, ppos); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - - if (!strncmp(kbuf, "paddr\n", count)) { - id_is_pid = false; - nr_targets = 1; - } - - if (id_is_pid) { - target_pids = str_to_pids(kbuf, count, &nr_targets); - if (!target_pids) { - ret = -ENOMEM; - goto out; - } - } - - mutex_lock(&ctx->kdamond_lock); - if (ctx->kdamond) { - if (id_is_pid) - dbgfs_put_pids(target_pids, nr_targets); - ret = -EBUSY; - goto unlock_out; - } - - /* remove previously set targets */ - dbgfs_set_targets(ctx, 0, NULL); - if (!nr_targets) { - ret = count; - goto unlock_out; - } - - /* Configure the context for the address space type */ - if (id_is_pid) - ret = damon_select_ops(ctx, DAMON_OPS_VADDR); - else - ret = damon_select_ops(ctx, DAMON_OPS_PADDR); - if (ret) - goto unlock_out; - - ret = dbgfs_set_targets(ctx, nr_targets, target_pids); - if (!ret) - ret = count; - -unlock_out: - mutex_unlock(&ctx->kdamond_lock); - kfree(target_pids); -out: - kfree(kbuf); - return ret; -} - -static ssize_t sprint_init_regions(struct damon_ctx *c, char *buf, ssize_t len) -{ - struct damon_target *t; - struct damon_region *r; - int target_idx = 0; - int written = 0; - int rc; - - damon_for_each_target(t, c) { - damon_for_each_region(r, t) { - rc = scnprintf(&buf[written], len - written, - "%d %lu %lu\n", - target_idx, r->ar.start, r->ar.end); - if (!rc) - return -ENOMEM; - written += rc; - } - target_idx++; - } - return written; -} - -static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - char *kbuf; - ssize_t len; - - kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); - if (!kbuf) - return -ENOMEM; - - mutex_lock(&ctx->kdamond_lock); - if (ctx->kdamond) { - mutex_unlock(&ctx->kdamond_lock); - len = -EBUSY; - goto out; - } - - len = sprint_init_regions(ctx, kbuf, count); - mutex_unlock(&ctx->kdamond_lock); - if (len < 0) - goto out; - len = simple_read_from_buffer(buf, count, ppos, kbuf, len); - -out: - kfree(kbuf); - return len; -} - -static int add_init_region(struct damon_ctx *c, int target_idx, - struct damon_addr_range *ar) -{ - struct damon_target *t; - struct damon_region *r, *prev; - unsigned long idx = 0; - int rc = -EINVAL; - - if (ar->start >= ar->end) - return -EINVAL; - - damon_for_each_target(t, c) { - if (idx++ == target_idx) { - r = damon_new_region(ar->start, ar->end); - if (!r) - return -ENOMEM; - damon_add_region(r, t); - if (damon_nr_regions(t) > 1) { - prev = damon_prev_region(r); - if (prev->ar.end > r->ar.start) { - damon_destroy_region(r, t); - return -EINVAL; - } - } - rc = 0; - } - } - return rc; -} - -static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len) -{ - struct damon_target *t; - struct damon_region *r, *next; - int pos = 0, parsed, ret; - int target_idx; - struct damon_addr_range ar; - int err; - - damon_for_each_target(t, c) { - damon_for_each_region_safe(r, next, t) - damon_destroy_region(r, t); - } - - while (pos < len) { - ret = sscanf(&str[pos], "%d %lu %lu%n", - &target_idx, &ar.start, &ar.end, &parsed); - if (ret != 3) - break; - err = add_init_region(c, target_idx, &ar); - if (err) - goto fail; - pos += parsed; - } - - return 0; - -fail: - damon_for_each_target(t, c) { - damon_for_each_region_safe(r, next, t) - damon_destroy_region(r, t); - } - return err; -} - -static ssize_t dbgfs_init_regions_write(struct file *file, - const char __user *buf, size_t count, - loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - char *kbuf; - ssize_t ret = count; - int err; - - kbuf = user_input_str(buf, count, ppos); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - - mutex_lock(&ctx->kdamond_lock); - if (ctx->kdamond) { - ret = -EBUSY; - goto unlock_out; - } - - err = set_init_regions(ctx, kbuf, ret); - if (err) - ret = err; - -unlock_out: - mutex_unlock(&ctx->kdamond_lock); - kfree(kbuf); - return ret; -} - -static ssize_t dbgfs_kdamond_pid_read(struct file *file, - char __user *buf, size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - char *kbuf; - ssize_t len; - - kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); - if (!kbuf) - return -ENOMEM; - - mutex_lock(&ctx->kdamond_lock); - if (ctx->kdamond) - len = scnprintf(kbuf, count, "%d\n", ctx->kdamond->pid); - else - len = scnprintf(kbuf, count, "none\n"); - mutex_unlock(&ctx->kdamond_lock); - if (!len) - goto out; - len = simple_read_from_buffer(buf, count, ppos, kbuf, len); - -out: - kfree(kbuf); - return len; -} - -static int damon_dbgfs_open(struct inode *inode, struct file *file) -{ - damon_dbgfs_warn_deprecation(); - - file->private_data = inode->i_private; - - return nonseekable_open(inode, file); -} - -static const struct file_operations attrs_fops = { - .open = damon_dbgfs_open, - .read = dbgfs_attrs_read, - .write = dbgfs_attrs_write, -}; - -static const struct file_operations schemes_fops = { - .open = damon_dbgfs_open, - .read = dbgfs_schemes_read, - .write = dbgfs_schemes_write, -}; - -static const struct file_operations target_ids_fops = { - .open = damon_dbgfs_open, - .read = dbgfs_target_ids_read, - .write = dbgfs_target_ids_write, -}; - -static const struct file_operations init_regions_fops = { - .open = damon_dbgfs_open, - .read = dbgfs_init_regions_read, - .write = dbgfs_init_regions_write, -}; - -static const struct file_operations kdamond_pid_fops = { - .open = damon_dbgfs_open, - .read = dbgfs_kdamond_pid_read, -}; - -static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx) -{ - const char * const file_names[] = {"attrs", "schemes", "target_ids", - "init_regions", "kdamond_pid"}; - const struct file_operations *fops[] = {&attrs_fops, &schemes_fops, - &target_ids_fops, &init_regions_fops, &kdamond_pid_fops}; - int i; - - for (i = 0; i < ARRAY_SIZE(file_names); i++) - debugfs_create_file(file_names[i], 0600, dir, ctx, fops[i]); -} - -static void dbgfs_before_terminate(struct damon_ctx *ctx) -{ - struct damon_target *t, *next; - - if (!damon_target_has_pid(ctx)) - return; - - mutex_lock(&ctx->kdamond_lock); - damon_for_each_target_safe(t, next, ctx) { - put_pid(t->pid); - damon_destroy_target(t); - } - mutex_unlock(&ctx->kdamond_lock); -} - -static struct damon_ctx *dbgfs_new_ctx(void) -{ - struct damon_ctx *ctx; - - ctx = damon_new_ctx(); - if (!ctx) - return NULL; - - if (damon_select_ops(ctx, DAMON_OPS_VADDR) && - damon_select_ops(ctx, DAMON_OPS_PADDR)) { - damon_destroy_ctx(ctx); - return NULL; - } - ctx->callback.before_terminate = dbgfs_before_terminate; - return ctx; -} - -static void dbgfs_destroy_ctx(struct damon_ctx *ctx) -{ - damon_destroy_ctx(ctx); -} - -static ssize_t damon_dbgfs_deprecated_read(struct file *file, - char __user *buf, size_t count, loff_t *ppos) -{ - static const char kbuf[512] = DAMON_DBGFS_DEPRECATION_NOTICE; - - return simple_read_from_buffer(buf, count, ppos, kbuf, strlen(kbuf)); -} - -/* - * Make a context of @name and create a debugfs directory for it. - * - * This function should be called while holding damon_dbgfs_lock. - * - * Returns 0 on success, negative error code otherwise. - */ -static int dbgfs_mk_context(char *name) -{ - struct dentry *root, **new_dirs, *new_dir; - struct damon_ctx **new_ctxs, *new_ctx; - - if (damon_nr_running_ctxs()) - return -EBUSY; - - new_ctxs = krealloc(dbgfs_ctxs, sizeof(*dbgfs_ctxs) * - (dbgfs_nr_ctxs + 1), GFP_KERNEL); - if (!new_ctxs) - return -ENOMEM; - dbgfs_ctxs = new_ctxs; - - new_dirs = krealloc(dbgfs_dirs, sizeof(*dbgfs_dirs) * - (dbgfs_nr_ctxs + 1), GFP_KERNEL); - if (!new_dirs) - return -ENOMEM; - dbgfs_dirs = new_dirs; - - root = dbgfs_dirs[0]; - if (!root) - return -ENOENT; - - new_dir = debugfs_create_dir(name, root); - /* Below check is required for a potential duplicated name case */ - if (IS_ERR(new_dir)) - return PTR_ERR(new_dir); - dbgfs_dirs[dbgfs_nr_ctxs] = new_dir; - - new_ctx = dbgfs_new_ctx(); - if (!new_ctx) { - debugfs_remove(new_dir); - dbgfs_dirs[dbgfs_nr_ctxs] = NULL; - return -ENOMEM; - } - - dbgfs_ctxs[dbgfs_nr_ctxs] = new_ctx; - dbgfs_fill_ctx_dir(dbgfs_dirs[dbgfs_nr_ctxs], - dbgfs_ctxs[dbgfs_nr_ctxs]); - dbgfs_nr_ctxs++; - - return 0; -} - -static ssize_t dbgfs_mk_context_write(struct file *file, - const char __user *buf, size_t count, loff_t *ppos) -{ - char *kbuf; - char *ctx_name; - ssize_t ret; - - kbuf = user_input_str(buf, count, ppos); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - ctx_name = kmalloc(count + 1, GFP_KERNEL); - if (!ctx_name) { - kfree(kbuf); - return -ENOMEM; - } - - /* Trim white space */ - if (sscanf(kbuf, "%s", ctx_name) != 1) { - ret = -EINVAL; - goto out; - } - - mutex_lock(&damon_dbgfs_lock); - ret = dbgfs_mk_context(ctx_name); - if (!ret) - ret = count; - mutex_unlock(&damon_dbgfs_lock); - -out: - kfree(kbuf); - kfree(ctx_name); - return ret; -} - -/* - * Remove a context of @name and its debugfs directory. - * - * This function should be called while holding damon_dbgfs_lock. - * - * Return 0 on success, negative error code otherwise. - */ -static int dbgfs_rm_context(char *name) -{ - struct dentry *root, *dir, **new_dirs; - struct inode *inode; - struct damon_ctx **new_ctxs; - int i, j; - int ret = 0; - - if (damon_nr_running_ctxs()) - return -EBUSY; - - root = dbgfs_dirs[0]; - if (!root) - return -ENOENT; - - dir = debugfs_lookup(name, root); - if (!dir) - return -ENOENT; - - inode = d_inode(dir); - if (!S_ISDIR(inode->i_mode)) { - ret = -EINVAL; - goto out_dput; - } - - new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs), - GFP_KERNEL); - if (!new_dirs) { - ret = -ENOMEM; - goto out_dput; - } - - new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs), - GFP_KERNEL); - if (!new_ctxs) { - ret = -ENOMEM; - goto out_new_dirs; - } - - for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) { - if (dbgfs_dirs[i] == dir) { - debugfs_remove(dbgfs_dirs[i]); - dbgfs_destroy_ctx(dbgfs_ctxs[i]); - continue; - } - new_dirs[j] = dbgfs_dirs[i]; - new_ctxs[j++] = dbgfs_ctxs[i]; - } - - kfree(dbgfs_dirs); - kfree(dbgfs_ctxs); - - dbgfs_dirs = new_dirs; - dbgfs_ctxs = new_ctxs; - dbgfs_nr_ctxs--; - - goto out_dput; - -out_new_dirs: - kfree(new_dirs); -out_dput: - dput(dir); - return ret; -} - -static ssize_t dbgfs_rm_context_write(struct file *file, - const char __user *buf, size_t count, loff_t *ppos) -{ - char *kbuf; - ssize_t ret; - char *ctx_name; - - kbuf = user_input_str(buf, count, ppos); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - ctx_name = kmalloc(count + 1, GFP_KERNEL); - if (!ctx_name) { - kfree(kbuf); - return -ENOMEM; - } - - /* Trim white space */ - if (sscanf(kbuf, "%s", ctx_name) != 1) { - ret = -EINVAL; - goto out; - } - - mutex_lock(&damon_dbgfs_lock); - ret = dbgfs_rm_context(ctx_name); - if (!ret) - ret = count; - mutex_unlock(&damon_dbgfs_lock); - -out: - kfree(kbuf); - kfree(ctx_name); - return ret; -} - -static ssize_t dbgfs_monitor_on_read(struct file *file, - char __user *buf, size_t count, loff_t *ppos) -{ - char monitor_on_buf[5]; - bool monitor_on = damon_nr_running_ctxs() != 0; - int len; - - len = scnprintf(monitor_on_buf, 5, monitor_on ? "on\n" : "off\n"); - - return simple_read_from_buffer(buf, count, ppos, monitor_on_buf, len); -} - -static ssize_t dbgfs_monitor_on_write(struct file *file, - const char __user *buf, size_t count, loff_t *ppos) -{ - ssize_t ret; - char *kbuf; - - kbuf = user_input_str(buf, count, ppos); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - - /* Remove white space */ - if (sscanf(kbuf, "%s", kbuf) != 1) { - kfree(kbuf); - return -EINVAL; - } - - mutex_lock(&damon_dbgfs_lock); - if (!strncmp(kbuf, "on", count)) { - int i; - - for (i = 0; i < dbgfs_nr_ctxs; i++) { - if (damon_targets_empty(dbgfs_ctxs[i])) { - kfree(kbuf); - mutex_unlock(&damon_dbgfs_lock); - return -EINVAL; - } - } - ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs, true); - } else if (!strncmp(kbuf, "off", count)) { - ret = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs); - } else { - ret = -EINVAL; - } - mutex_unlock(&damon_dbgfs_lock); - - if (!ret) - ret = count; - kfree(kbuf); - return ret; -} - -static int damon_dbgfs_static_file_open(struct inode *inode, struct file *file) -{ - damon_dbgfs_warn_deprecation(); - return nonseekable_open(inode, file); -} - -static const struct file_operations deprecated_fops = { - .read = damon_dbgfs_deprecated_read, -}; - -static const struct file_operations mk_contexts_fops = { - .open = damon_dbgfs_static_file_open, - .write = dbgfs_mk_context_write, -}; - -static const struct file_operations rm_contexts_fops = { - .open = damon_dbgfs_static_file_open, - .write = dbgfs_rm_context_write, -}; - -static const struct file_operations monitor_on_fops = { - .open = damon_dbgfs_static_file_open, - .read = dbgfs_monitor_on_read, - .write = dbgfs_monitor_on_write, -}; - -static int __init __damon_dbgfs_init(void) -{ - struct dentry *dbgfs_root; - const char * const file_names[] = {"mk_contexts", "rm_contexts", - "monitor_on_DEPRECATED", "DEPRECATED"}; - const struct file_operations *fops[] = {&mk_contexts_fops, - &rm_contexts_fops, &monitor_on_fops, &deprecated_fops}; - int i; - - dbgfs_root = debugfs_create_dir("damon", NULL); - - for (i = 0; i < ARRAY_SIZE(file_names); i++) - debugfs_create_file(file_names[i], 0600, dbgfs_root, NULL, - fops[i]); - dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]); - - dbgfs_dirs = kmalloc(sizeof(dbgfs_root), GFP_KERNEL); - if (!dbgfs_dirs) { - debugfs_remove(dbgfs_root); - return -ENOMEM; - } - dbgfs_dirs[0] = dbgfs_root; - - return 0; -} - -/* - * Functions for the initialization - */ - -static int __init damon_dbgfs_init(void) -{ - int rc = -ENOMEM; - - mutex_lock(&damon_dbgfs_lock); - dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL); - if (!dbgfs_ctxs) - goto out; - dbgfs_ctxs[0] = dbgfs_new_ctx(); - if (!dbgfs_ctxs[0]) { - kfree(dbgfs_ctxs); - goto out; - } - dbgfs_nr_ctxs = 1; - - rc = __damon_dbgfs_init(); - if (rc) { - kfree(dbgfs_ctxs[0]); - kfree(dbgfs_ctxs); - pr_err("%s: dbgfs init failed\n", __func__); - } - -out: - mutex_unlock(&damon_dbgfs_lock); - return rc; -} - -module_init(damon_dbgfs_init); - -#include "dbgfs-test.h" diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c index 3de2916a65c3..4af8fd4a390b 100644 --- a/mm/damon/lru_sort.c +++ b/mm/damon/lru_sort.c @@ -163,7 +163,8 @@ static struct damos *damon_lru_sort_new_scheme( /* under the quota. */ "a, /* (De)activate this according to the watermarks. */ - &damon_lru_sort_wmarks); + &damon_lru_sort_wmarks, + NUMA_NO_NODE); } /* Create a DAMON-based operation scheme for hot memory regions */ @@ -185,61 +186,48 @@ static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres) return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO); } -static void damon_lru_sort_copy_quota_status(struct damos_quota *dst, - struct damos_quota *src) -{ - dst->total_charged_sz = src->total_charged_sz; - dst->total_charged_ns = src->total_charged_ns; - dst->charged_sz = src->charged_sz; - dst->charged_from = src->charged_from; - dst->charge_target_from = src->charge_target_from; - dst->charge_addr_from = src->charge_addr_from; -} - static int damon_lru_sort_apply_parameters(void) { - struct damos *scheme, *hot_scheme, *cold_scheme; - struct damos *old_hot_scheme = NULL, *old_cold_scheme = NULL; + struct damon_ctx *param_ctx; + struct damon_target *param_target; + struct damos *hot_scheme, *cold_scheme; unsigned int hot_thres, cold_thres; - int err = 0; + int err; - err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs); + err = damon_modules_new_paddr_ctx_target(¶m_ctx, ¶m_target); if (err) return err; - damon_for_each_scheme(scheme, ctx) { - if (!old_hot_scheme) { - old_hot_scheme = scheme; - continue; - } - old_cold_scheme = scheme; - } + err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs); + if (err) + goto out; + err = -ENOMEM; hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) * hot_thres_access_freq / 1000; hot_scheme = damon_lru_sort_new_hot_scheme(hot_thres); if (!hot_scheme) - return -ENOMEM; - if (old_hot_scheme) - damon_lru_sort_copy_quota_status(&hot_scheme->quota, - &old_hot_scheme->quota); + goto out; cold_thres = cold_min_age / damon_lru_sort_mon_attrs.aggr_interval; cold_scheme = damon_lru_sort_new_cold_scheme(cold_thres); if (!cold_scheme) { damon_destroy_scheme(hot_scheme); - return -ENOMEM; + goto out; } - if (old_cold_scheme) - damon_lru_sort_copy_quota_status(&cold_scheme->quota, - &old_cold_scheme->quota); - damon_set_schemes(ctx, &hot_scheme, 1); - damon_add_scheme(ctx, cold_scheme); + damon_set_schemes(param_ctx, &hot_scheme, 1); + damon_add_scheme(param_ctx, cold_scheme); - return damon_set_region_biggest_system_ram_default(target, + err = damon_set_region_biggest_system_ram_default(param_target, &monitor_region_start, &monitor_region_end); + if (err) + goto out; + err = damon_commit_ctx(ctx, param_ctx); +out: + damon_destroy_ctx(param_ctx); + return err; } static int damon_lru_sort_turn(bool on) diff --git a/mm/damon/modules-common.c b/mm/damon/modules-common.c index 7cf96574cde7..86d58f8c4f63 100644 --- a/mm/damon/modules-common.c +++ b/mm/damon/modules-common.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Common Primitives for DAMON Modules + * Common Code for DAMON Modules * * Author: SeongJae Park <sj@kernel.org> */ diff --git a/mm/damon/modules-common.h b/mm/damon/modules-common.h index f49cdb417005..f103ad556368 100644 --- a/mm/damon/modules-common.h +++ b/mm/damon/modules-common.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Common Primitives for DAMON Modules + * Common Code for DAMON Modules * * Author: SeongJae Park <sj@kernel.org> */ diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c index d25d99cb5f2b..b43620fee6bb 100644 --- a/mm/damon/ops-common.c +++ b/mm/damon/ops-common.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Common Primitives for Data Access Monitoring + * Common Code for Data Access Monitoring * * Author: SeongJae Park <sj@kernel.org> */ @@ -9,6 +9,8 @@ #include <linux/page_idle.h> #include <linux/pagemap.h> #include <linux/rmap.h> +#include <linux/swap.h> +#include <linux/swapops.h> #include "ops-common.h" @@ -24,7 +26,7 @@ struct folio *damon_get_folio(unsigned long pfn) struct page *page = pfn_to_online_page(pfn); struct folio *folio; - if (!page || PageTail(page)) + if (!page) return NULL; folio = page_folio(page); @@ -39,12 +41,29 @@ struct folio *damon_get_folio(unsigned long pfn) void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr) { - struct folio *folio = damon_get_folio(pte_pfn(ptep_get(pte))); + pte_t pteval = ptep_get(pte); + struct folio *folio; + bool young = false; + unsigned long pfn; + + if (likely(pte_present(pteval))) + pfn = pte_pfn(pteval); + else + pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); + folio = damon_get_folio(pfn); if (!folio) return; - if (ptep_clear_young_notify(vma, addr, pte)) + /* + * PFN swap PTEs, such as device-exclusive ones, that actually map pages + * are "old" from a CPU perspective. The MMU notifier takes care of any + * device aspects. + */ + if (likely(pte_present(pteval))) + young |= ptep_test_and_clear_young(vma, addr, pte); + young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE); + if (young) folio_set_young(folio); folio_set_idle(folio); diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h index 18d837d11bce..cc9f5da9c012 100644 --- a/mm/damon/ops-common.h +++ b/mm/damon/ops-common.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Common Primitives for Data Access Monitoring + * Common Code for Data Access Monitoring * * Author: SeongJae Park <sj@kernel.org> */ diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 5e6dc312072c..4102a8c5f992 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * DAMON Primitives for The Physical Address Space + * DAMON Code for The Physical Address Space * * Author: SeongJae Park <sj@kernel.org> */ @@ -12,12 +12,15 @@ #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/swap.h> +#include <linux/memory-tiers.h> +#include <linux/migrate.h> +#include <linux/mm_inline.h> #include "../internal.h" #include "ops-common.h" -static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma, - unsigned long addr, void *arg) +static bool damon_folio_mkold_one(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr, void *arg) { DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); @@ -31,33 +34,38 @@ static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma, return true; } -static void damon_pa_mkold(unsigned long paddr) +static void damon_folio_mkold(struct folio *folio) { - struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); struct rmap_walk_control rwc = { - .rmap_one = __damon_pa_mkold, + .rmap_one = damon_folio_mkold_one, .anon_lock = folio_lock_anon_vma_read, }; bool need_lock; - if (!folio) - return; - if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { folio_set_idle(folio); - goto out; + return; } need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); if (need_lock && !folio_trylock(folio)) - goto out; + return; rmap_walk(folio, &rwc); if (need_lock) folio_unlock(folio); -out: +} + +static void damon_pa_mkold(unsigned long paddr) +{ + struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); + + if (!folio) + return; + + damon_folio_mkold(folio); folio_put(folio); } @@ -79,17 +87,25 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx) } } -static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, - unsigned long addr, void *arg) +static bool damon_folio_young_one(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr, void *arg) { bool *accessed = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); + pte_t pte; *accessed = false; while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; if (pvmw.pte) { - *accessed = pte_young(ptep_get(pvmw.pte)) || + pte = ptep_get(pvmw.pte); + + /* + * PFN swap PTEs, such as device-exclusive ones, that + * actually map pages are "old" from a CPU perspective. + * The MMU notifier takes care of any device aspects. + */ + *accessed = (pte_present(pte) && pte_young(pte)) || !folio_test_idle(folio) || mmu_notifier_test_young(vma->vm_mm, addr); } else { @@ -111,38 +127,44 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, return *accessed == false; } -static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz) +static bool damon_folio_young(struct folio *folio) { - struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); bool accessed = false; struct rmap_walk_control rwc = { .arg = &accessed, - .rmap_one = __damon_pa_young, + .rmap_one = damon_folio_young_one, .anon_lock = folio_lock_anon_vma_read, }; bool need_lock; - if (!folio) - return false; - if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { if (folio_test_idle(folio)) - accessed = false; + return false; else - accessed = true; - goto out; + return true; } need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); if (need_lock && !folio_trylock(folio)) - goto out; + return false; rmap_walk(folio, &rwc); if (need_lock) folio_unlock(folio); -out: + return accessed; +} + +static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz) +{ + struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); + bool accessed; + + if (!folio) + return false; + + accessed = damon_folio_young(folio); *folio_sz = folio_size(folio); folio_put(folio); return accessed; @@ -184,16 +206,20 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx) return max_nr_accesses; } -static bool __damos_pa_filter_out(struct damos_filter *filter, +static bool damos_pa_filter_match(struct damos_filter *filter, struct folio *folio) { bool matched = false; struct mem_cgroup *memcg; + size_t folio_sz; switch (filter->type) { case DAMOS_FILTER_TYPE_ANON: matched = folio_test_anon(folio); break; + case DAMOS_FILTER_TYPE_ACTIVE: + matched = folio_test_active(folio); + break; case DAMOS_FILTER_TYPE_MEMCG: rcu_read_lock(); memcg = folio_memcg_check(folio); @@ -203,6 +229,19 @@ static bool __damos_pa_filter_out(struct damos_filter *filter, matched = filter->memcg_id == mem_cgroup_id(memcg); rcu_read_unlock(); break; + case DAMOS_FILTER_TYPE_YOUNG: + matched = damon_folio_young(folio); + if (matched) + damon_folio_mkold(folio); + break; + case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: + folio_sz = folio_size(folio); + matched = filter->sz_range.min <= folio_sz && + folio_sz <= filter->sz_range.max; + break; + case DAMOS_FILTER_TYPE_UNMAPPED: + matched = !folio_mapped(folio) || !folio_raw_mapping(folio); + break; default: break; } @@ -217,26 +256,63 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) { struct damos_filter *filter; - damos_for_each_filter(filter, scheme) { - if (__damos_pa_filter_out(filter, folio)) - return true; + if (scheme->core_filters_allowed) + return false; + + damos_for_each_ops_filter(filter, scheme) { + if (damos_pa_filter_match(filter, folio)) + return !filter->allow; + } + return scheme->ops_filters_default_reject; +} + +static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s) +{ + if (!folio) + return true; + if (folio == s->last_applied) { + folio_put(folio); + return true; } return false; } -static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s) +static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, + unsigned long *sz_filter_passed) { unsigned long addr, applied; LIST_HEAD(folio_list); + bool install_young_filter = true; + struct damos_filter *filter; + struct folio *folio; - for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { - struct folio *folio = damon_get_folio(PHYS_PFN(addr)); + /* check access in page level again by default */ + damos_for_each_ops_filter(filter, s) { + if (filter->type == DAMOS_FILTER_TYPE_YOUNG) { + install_young_filter = false; + break; + } + } + if (install_young_filter) { + filter = damos_new_filter( + DAMOS_FILTER_TYPE_YOUNG, true, false); + if (!filter) + return 0; + damos_add_filter(s, filter); + } - if (!folio) + addr = r->ar.start; + while (addr < r->ar.end) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { + addr += PAGE_SIZE; continue; + } if (damos_pa_filter_out(s, folio)) goto put_folio; + else + *sz_filter_passed += folio_size(folio); folio_clear_referenced(folio); folio_test_clear_young(folio); @@ -247,26 +323,36 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s) else list_add(&folio->lru, &folio_list); put_folio: + addr += folio_size(folio); folio_put(folio); } - applied = reclaim_pages(&folio_list, false); + if (install_young_filter) + damos_destroy_filter(filter); + applied = reclaim_pages(&folio_list); cond_resched(); + s->last_applied = folio; return applied * PAGE_SIZE; } static inline unsigned long damon_pa_mark_accessed_or_deactivate( - struct damon_region *r, struct damos *s, bool mark_accessed) + struct damon_region *r, struct damos *s, bool mark_accessed, + unsigned long *sz_filter_passed) { unsigned long addr, applied = 0; + struct folio *folio; - for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { - struct folio *folio = damon_get_folio(PHYS_PFN(addr)); - - if (!folio) + addr = r->ar.start; + while (addr < r->ar.end) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { + addr += PAGE_SIZE; continue; + } if (damos_pa_filter_out(s, folio)) goto put_folio; + else + *sz_filter_passed += folio_size(folio); if (mark_accessed) folio_mark_accessed(folio); @@ -274,36 +360,232 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( folio_deactivate(folio); applied += folio_nr_pages(folio); put_folio: + addr += folio_size(folio); folio_put(folio); } + s->last_applied = folio; return applied * PAGE_SIZE; } static unsigned long damon_pa_mark_accessed(struct damon_region *r, - struct damos *s) + struct damos *s, unsigned long *sz_filter_passed) { - return damon_pa_mark_accessed_or_deactivate(r, s, true); + return damon_pa_mark_accessed_or_deactivate(r, s, true, + sz_filter_passed); } static unsigned long damon_pa_deactivate_pages(struct damon_region *r, - struct damos *s) + struct damos *s, unsigned long *sz_filter_passed) +{ + return damon_pa_mark_accessed_or_deactivate(r, s, false, + sz_filter_passed); +} + +static unsigned int __damon_pa_migrate_folio_list( + struct list_head *migrate_folios, struct pglist_data *pgdat, + int target_nid) { - return damon_pa_mark_accessed_or_deactivate(r, s, false); + unsigned int nr_succeeded = 0; + nodemask_t allowed_mask = NODE_MASK_NONE; + struct migration_target_control mtc = { + /* + * Allocate from 'node', or fail quickly and quietly. + * When this happens, 'page' will likely just be discarded + * instead of migrated. + */ + .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | + __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT, + .nid = target_nid, + .nmask = &allowed_mask + }; + + if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE) + return 0; + + if (list_empty(migrate_folios)) + return 0; + + /* Migration ignores all cpuset and mempolicy settings */ + migrate_pages(migrate_folios, alloc_migrate_folio, NULL, + (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON, + &nr_succeeded); + + return nr_succeeded; +} + +static unsigned int damon_pa_migrate_folio_list(struct list_head *folio_list, + struct pglist_data *pgdat, + int target_nid) +{ + unsigned int nr_migrated = 0; + struct folio *folio; + LIST_HEAD(ret_folios); + LIST_HEAD(migrate_folios); + + while (!list_empty(folio_list)) { + struct folio *folio; + + cond_resched(); + + folio = lru_to_folio(folio_list); + list_del(&folio->lru); + + if (!folio_trylock(folio)) + goto keep; + + /* Relocate its contents to another node. */ + list_add(&folio->lru, &migrate_folios); + folio_unlock(folio); + continue; +keep: + list_add(&folio->lru, &ret_folios); + } + /* 'folio_list' is always empty here */ + + /* Migrate folios selected for migration */ + nr_migrated += __damon_pa_migrate_folio_list( + &migrate_folios, pgdat, target_nid); + /* + * Folios that could not be migrated are still in @migrate_folios. Add + * those back on @folio_list + */ + if (!list_empty(&migrate_folios)) + list_splice_init(&migrate_folios, folio_list); + + try_to_unmap_flush(); + + list_splice(&ret_folios, folio_list); + + while (!list_empty(folio_list)) { + folio = lru_to_folio(folio_list); + list_del(&folio->lru); + folio_putback_lru(folio); + } + + return nr_migrated; +} + +static unsigned long damon_pa_migrate_pages(struct list_head *folio_list, + int target_nid) +{ + int nid; + unsigned long nr_migrated = 0; + LIST_HEAD(node_folio_list); + unsigned int noreclaim_flag; + + if (list_empty(folio_list)) + return nr_migrated; + + noreclaim_flag = memalloc_noreclaim_save(); + + nid = folio_nid(lru_to_folio(folio_list)); + do { + struct folio *folio = lru_to_folio(folio_list); + + if (nid == folio_nid(folio)) { + list_move(&folio->lru, &node_folio_list); + continue; + } + + nr_migrated += damon_pa_migrate_folio_list(&node_folio_list, + NODE_DATA(nid), + target_nid); + nid = folio_nid(lru_to_folio(folio_list)); + } while (!list_empty(folio_list)); + + nr_migrated += damon_pa_migrate_folio_list(&node_folio_list, + NODE_DATA(nid), + target_nid); + + memalloc_noreclaim_restore(noreclaim_flag); + + return nr_migrated; +} + +static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s, + unsigned long *sz_filter_passed) +{ + unsigned long addr, applied; + LIST_HEAD(folio_list); + struct folio *folio; + + addr = r->ar.start; + while (addr < r->ar.end) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { + addr += PAGE_SIZE; + continue; + } + + if (damos_pa_filter_out(s, folio)) + goto put_folio; + else + *sz_filter_passed += folio_size(folio); + + if (!folio_isolate_lru(folio)) + goto put_folio; + list_add(&folio->lru, &folio_list); +put_folio: + addr += folio_size(folio); + folio_put(folio); + } + applied = damon_pa_migrate_pages(&folio_list, s->target_nid); + cond_resched(); + s->last_applied = folio; + return applied * PAGE_SIZE; +} + +static bool damon_pa_scheme_has_filter(struct damos *s) +{ + struct damos_filter *f; + + damos_for_each_ops_filter(f, s) + return true; + return false; +} + +static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s, + unsigned long *sz_filter_passed) +{ + unsigned long addr; + struct folio *folio; + + if (!damon_pa_scheme_has_filter(s)) + return 0; + + addr = r->ar.start; + while (addr < r->ar.end) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { + addr += PAGE_SIZE; + continue; + } + + if (!damos_pa_filter_out(s, folio)) + *sz_filter_passed += folio_size(folio); + addr += folio_size(folio); + folio_put(folio); + } + s->last_applied = folio; + return 0; } static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, - struct damos *scheme) + struct damos *scheme, unsigned long *sz_filter_passed) { switch (scheme->action) { case DAMOS_PAGEOUT: - return damon_pa_pageout(r, scheme); + return damon_pa_pageout(r, scheme, sz_filter_passed); case DAMOS_LRU_PRIO: - return damon_pa_mark_accessed(r, scheme); + return damon_pa_mark_accessed(r, scheme, sz_filter_passed); case DAMOS_LRU_DEPRIO: - return damon_pa_deactivate_pages(r, scheme); + return damon_pa_deactivate_pages(r, scheme, sz_filter_passed); + case DAMOS_MIGRATE_HOT: + case DAMOS_MIGRATE_COLD: + return damon_pa_migrate(r, scheme, sz_filter_passed); case DAMOS_STAT: - break; + return damon_pa_stat(r, scheme, sz_filter_passed); default: /* DAMOS actions that not yet supported by 'paddr'. */ break; @@ -322,6 +604,10 @@ static int damon_pa_scheme_score(struct damon_ctx *context, return damon_hot_score(context, r, scheme); case DAMOS_LRU_DEPRIO: return damon_cold_score(context, r, scheme); + case DAMOS_MIGRATE_HOT: + return damon_hot_score(context, r, scheme); + case DAMOS_MIGRATE_COLD: + return damon_cold_score(context, r, scheme); default: break; } @@ -337,7 +623,6 @@ static int __init damon_pa_initcall(void) .update = NULL, .prepare_access_checks = damon_pa_prepare_access_checks, .check_accesses = damon_pa_check_accesses, - .reset_aggregated = NULL, .target_valid = NULL, .cleanup = NULL, .apply_scheme = damon_pa_apply_scheme, diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c index 9bd341d62b4c..a675150965e0 100644 --- a/mm/damon/reclaim.c +++ b/mm/damon/reclaim.c @@ -177,76 +177,65 @@ static struct damos *damon_reclaim_new_scheme(void) /* under the quota. */ &damon_reclaim_quota, /* (De)activate this according to the watermarks. */ - &damon_reclaim_wmarks); -} - -static void damon_reclaim_copy_quota_status(struct damos_quota *dst, - struct damos_quota *src) -{ - dst->total_charged_sz = src->total_charged_sz; - dst->total_charged_ns = src->total_charged_ns; - dst->charged_sz = src->charged_sz; - dst->charged_from = src->charged_from; - dst->charge_target_from = src->charge_target_from; - dst->charge_addr_from = src->charge_addr_from; - dst->esz_bp = src->esz_bp; + &damon_reclaim_wmarks, + NUMA_NO_NODE); } static int damon_reclaim_apply_parameters(void) { - struct damos *scheme, *old_scheme; + struct damon_ctx *param_ctx; + struct damon_target *param_target; + struct damos *scheme; struct damos_quota_goal *goal; struct damos_filter *filter; - int err = 0; + int err; - err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs); + err = damon_modules_new_paddr_ctx_target(¶m_ctx, ¶m_target); if (err) return err; - /* Will be freed by next 'damon_set_schemes()' below */ + err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs); + if (err) + goto out; + + err = -ENOMEM; scheme = damon_reclaim_new_scheme(); if (!scheme) - return -ENOMEM; - if (!list_empty(&ctx->schemes)) { - damon_for_each_scheme(old_scheme, ctx) - damon_reclaim_copy_quota_status(&scheme->quota, - &old_scheme->quota); - } + goto out; + damon_set_schemes(ctx, &scheme, 1); if (quota_mem_pressure_us) { goal = damos_new_quota_goal(DAMOS_QUOTA_SOME_MEM_PSI_US, quota_mem_pressure_us); - if (!goal) { - damon_destroy_scheme(scheme); - return -ENOMEM; - } + if (!goal) + goto out; damos_add_quota_goal(&scheme->quota, goal); } if (quota_autotune_feedback) { goal = damos_new_quota_goal(DAMOS_QUOTA_USER_INPUT, 10000); - if (!goal) { - damon_destroy_scheme(scheme); - return -ENOMEM; - } + if (!goal) + goto out; goal->current_value = quota_autotune_feedback; damos_add_quota_goal(&scheme->quota, goal); } if (skip_anon) { - filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true); - if (!filter) { - /* Will be freed by next 'damon_set_schemes()' below */ - damon_destroy_scheme(scheme); - return -ENOMEM; - } + filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false); + if (!filter) + goto out; damos_add_filter(scheme, filter); } - damon_set_schemes(ctx, &scheme, 1); - return damon_set_region_biggest_system_ram_default(target, + err = damon_set_region_biggest_system_ram_default(param_target, &monitor_region_start, &monitor_region_end); + if (err) + goto out; + err = damon_commit_ctx(ctx, param_ctx); +out: + damon_destroy_ctx(param_ctx); + return err; } static int damon_reclaim_turn(bool on) diff --git a/mm/damon/sysfs-common.c b/mm/damon/sysfs-common.c index 70edf45c2174..ffaf285e241a 100644 --- a/mm/damon/sysfs-common.c +++ b/mm/damon/sysfs-common.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Common Primitives for DAMON Sysfs Interface + * Common Code for DAMON Sysfs Interface * * Author: SeongJae Park <sj@kernel.org> */ diff --git a/mm/damon/sysfs-common.h b/mm/damon/sysfs-common.h index a63f51577cff..2099adee11d0 100644 --- a/mm/damon/sysfs-common.h +++ b/mm/damon/sysfs-common.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Common Primitives for DAMON Sysfs Interface + * Common Code for DAMON Sysfs Interface * * Author: SeongJae Park <sj@kernel.org> */ @@ -38,26 +38,20 @@ void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes); extern const struct kobj_type damon_sysfs_schemes_ktype; -int damon_sysfs_set_schemes(struct damon_ctx *ctx, +int damon_sysfs_add_schemes(struct damon_ctx *ctx, struct damon_sysfs_schemes *sysfs_schemes); void damon_sysfs_schemes_update_stats( struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx); -int damon_sysfs_schemes_update_regions_start( - struct damon_sysfs_schemes *sysfs_schemes, - struct damon_ctx *ctx, bool total_bytes_only); - -void damos_sysfs_mark_finished_regions_updates(struct damon_ctx *ctx); - -bool damos_sysfs_regions_upd_done(void); - -int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx); +void damos_sysfs_populate_region_dir(struct damon_sysfs_schemes *sysfs_schemes, + struct damon_ctx *ctx, struct damon_target *t, + struct damon_region *r, struct damos *s, + bool total_bytes_only, unsigned long sz_filter_passed); int damon_sysfs_schemes_clear_regions( - struct damon_sysfs_schemes *sysfs_schemes, - struct damon_ctx *ctx); + struct damon_sysfs_schemes *sysfs_schemes); int damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx); diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 53a90ac678fb..30ae7518ffbf 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -6,6 +6,7 @@ */ #include <linux/slab.h> +#include <linux/numa.h> #include "sysfs-common.h" @@ -18,6 +19,7 @@ struct damon_sysfs_scheme_region { struct damon_addr_range ar; unsigned int nr_accesses; unsigned int age; + unsigned long sz_filter_passed; struct list_head list; }; @@ -73,6 +75,15 @@ static ssize_t age_show(struct kobject *kobj, struct kobj_attribute *attr, return sysfs_emit(buf, "%u\n", region->age); } +static ssize_t sz_filter_passed_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_scheme_region *region = container_of(kobj, + struct damon_sysfs_scheme_region, kobj); + + return sysfs_emit(buf, "%lu\n", region->sz_filter_passed); +} + static void damon_sysfs_scheme_region_release(struct kobject *kobj) { struct damon_sysfs_scheme_region *region = container_of(kobj, @@ -94,11 +105,15 @@ static struct kobj_attribute damon_sysfs_scheme_region_nr_accesses_attr = static struct kobj_attribute damon_sysfs_scheme_region_age_attr = __ATTR_RO_MODE(age, 0400); +static struct kobj_attribute damon_sysfs_scheme_region_sz_filter_passed_attr = + __ATTR_RO_MODE(sz_filter_passed, 0400); + static struct attribute *damon_sysfs_scheme_region_attrs[] = { &damon_sysfs_scheme_region_start_attr.attr, &damon_sysfs_scheme_region_end_attr.attr, &damon_sysfs_scheme_region_nr_accesses_attr.attr, &damon_sysfs_scheme_region_age_attr.attr, + &damon_sysfs_scheme_region_sz_filter_passed_attr.attr, NULL, }; ATTRIBUTE_GROUPS(damon_sysfs_scheme_region); @@ -113,55 +128,11 @@ static const struct kobj_type damon_sysfs_scheme_region_ktype = { * scheme regions directory */ -/* - * enum damos_sysfs_regions_upd_status - Represent DAMOS tried regions update - * status - * @DAMOS_TRIED_REGIONS_UPD_IDLE: Waiting for next request. - * @DAMOS_TRIED_REGIONS_UPD_STARTED: Update started. - * @DAMOS_TRIED_REGIONS_UPD_FINISHED: Update finished. - * - * Each DAMON-based operation scheme (&struct damos) has its own apply - * interval, and we need to expose the scheme tried regions based on only - * single snapshot. For this, we keep the tried regions update status for each - * scheme. The status becomes 'idle' at the beginning. - * - * Once the tried regions update request is received, the request handling - * start function (damon_sysfs_scheme_update_regions_start()) sets the status - * of all schemes as 'idle' again, and register ->before_damos_apply() - * callback. - * - * Then, the first followup ->before_damos_apply() callback - * (damon_sysfs_before_damos_apply()) sets the status 'started'. The first - * ->after_sampling() or ->after_aggregation() callback - * (damon_sysfs_cmd_request_callback()) after the call is called only after - * the scheme is completely applied to the given snapshot. Hence the callback - * knows the situation by showing 'started' status, and sets the status as - * 'finished'. Then, damon_sysfs_before_damos_apply() understands the - * situation by showing the 'finished' status and do nothing. - * - * If DAMOS is not applied to any region due to any reasons including the - * access pattern, the watermarks, the quotas, and the filters, - * ->before_damos_apply() will not be called back. Until the situation is - * changed, the update will not be finished. To avoid this, - * damon_sysfs_after_sampling() set the status as 'finished' if more than two - * apply intervals of the scheme is passed while the state is 'idle'. - * - * Finally, the tried regions request handling finisher function - * (damon_sysfs_schemes_update_regions_stop()) unregisters the callbacks. - */ -enum damos_sysfs_regions_upd_status { - DAMOS_TRIED_REGIONS_UPD_IDLE, - DAMOS_TRIED_REGIONS_UPD_STARTED, - DAMOS_TRIED_REGIONS_UPD_FINISHED, -}; - struct damon_sysfs_scheme_regions { struct kobject kobj; struct list_head regions_list; int nr_regions; unsigned long total_bytes; - enum damos_sysfs_regions_upd_status upd_status; - unsigned long upd_timeout_jiffies; }; static struct damon_sysfs_scheme_regions * @@ -177,7 +148,6 @@ damon_sysfs_scheme_regions_alloc(void) INIT_LIST_HEAD(®ions->regions_list); regions->nr_regions = 0; regions->total_bytes = 0; - regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE; return regions; } @@ -232,6 +202,7 @@ struct damon_sysfs_stats { unsigned long sz_tried; unsigned long nr_applied; unsigned long sz_applied; + unsigned long sz_ops_filter_passed; unsigned long qt_exceeds; }; @@ -276,6 +247,15 @@ static ssize_t sz_applied_show(struct kobject *kobj, return sysfs_emit(buf, "%lu\n", stats->sz_applied); } +static ssize_t sz_ops_filter_passed_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_stats *stats = container_of(kobj, + struct damon_sysfs_stats, kobj); + + return sysfs_emit(buf, "%lu\n", stats->sz_ops_filter_passed); +} + static ssize_t qt_exceeds_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -302,6 +282,9 @@ static struct kobj_attribute damon_sysfs_stats_nr_applied_attr = static struct kobj_attribute damon_sysfs_stats_sz_applied_attr = __ATTR_RO_MODE(sz_applied, 0400); +static struct kobj_attribute damon_sysfs_stats_sz_ops_filter_passed_attr = + __ATTR_RO_MODE(sz_ops_filter_passed, 0400); + static struct kobj_attribute damon_sysfs_stats_qt_exceeds_attr = __ATTR_RO_MODE(qt_exceeds, 0400); @@ -310,6 +293,7 @@ static struct attribute *damon_sysfs_stats_attrs[] = { &damon_sysfs_stats_sz_tried_attr.attr, &damon_sysfs_stats_nr_applied_attr.attr, &damon_sysfs_stats_sz_applied_attr.attr, + &damon_sysfs_stats_sz_ops_filter_passed_attr.attr, &damon_sysfs_stats_qt_exceeds_attr.attr, NULL, }; @@ -325,24 +309,46 @@ static const struct kobj_type damon_sysfs_stats_ktype = { * filter directory */ +/* + * enum damos_sysfs_filter_handle_layer - Layers handling filters of a dir. + */ +enum damos_sysfs_filter_handle_layer { + DAMOS_SYSFS_FILTER_HANDLE_LAYER_CORE, + DAMOS_SYSFS_FILTER_HANDLE_LAYER_OPS, + DAMOS_SYSFS_FILTER_HANDLE_LAYER_BOTH, +}; + struct damon_sysfs_scheme_filter { struct kobject kobj; + enum damos_sysfs_filter_handle_layer handle_layer; enum damos_filter_type type; bool matching; + bool allow; char *memcg_path; struct damon_addr_range addr_range; + struct damon_size_range sz_range; int target_idx; }; -static struct damon_sysfs_scheme_filter *damon_sysfs_scheme_filter_alloc(void) +static struct damon_sysfs_scheme_filter *damon_sysfs_scheme_filter_alloc( + enum damos_sysfs_filter_handle_layer layer) { - return kzalloc(sizeof(struct damon_sysfs_scheme_filter), GFP_KERNEL); + struct damon_sysfs_scheme_filter *filter; + + filter = kzalloc(sizeof(struct damon_sysfs_scheme_filter), GFP_KERNEL); + if (filter) + filter->handle_layer = layer; + return filter; } /* Should match with enum damos_filter_type */ static const char * const damon_sysfs_scheme_filter_type_strs[] = { "anon", + "active", "memcg", + "young", + "hugepage_size", + "unmapped", "addr", "target", }; @@ -357,6 +363,23 @@ static ssize_t type_show(struct kobject *kobj, damon_sysfs_scheme_filter_type_strs[filter->type]); } +static bool damos_sysfs_scheme_filter_valid_type( + enum damos_sysfs_filter_handle_layer layer, + enum damos_filter_type type) +{ + switch (layer) { + case DAMOS_SYSFS_FILTER_HANDLE_LAYER_BOTH: + return true; + case DAMOS_SYSFS_FILTER_HANDLE_LAYER_CORE: + return !damos_filter_for_ops(type); + case DAMOS_SYSFS_FILTER_HANDLE_LAYER_OPS: + return damos_filter_for_ops(type); + default: + break; + } + return false; +} + static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { @@ -368,6 +391,9 @@ static ssize_t type_store(struct kobject *kobj, for (type = 0; type < NR_DAMOS_FILTER_TYPES; type++) { if (sysfs_streq(buf, damon_sysfs_scheme_filter_type_strs[ type])) { + if (!damos_sysfs_scheme_filter_valid_type( + filter->handle_layer, type)) + break; filter->type = type; ret = count; break; @@ -400,6 +426,30 @@ static ssize_t matching_store(struct kobject *kobj, return count; } +static ssize_t allow_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + + return sysfs_emit(buf, "%c\n", filter->allow ? 'Y' : 'N'); +} + +static ssize_t allow_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + bool allow; + int err = kstrtobool(buf, &allow); + + if (err) + return err; + + filter->allow = allow; + return count; +} + static ssize_t memcg_path_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -415,12 +465,14 @@ static ssize_t memcg_path_store(struct kobject *kobj, { struct damon_sysfs_scheme_filter *filter = container_of(kobj, struct damon_sysfs_scheme_filter, kobj); - char *path = kmalloc(sizeof(*path) * (count + 1), GFP_KERNEL); + char *path = kmalloc_array(size_add(count, 1), sizeof(*path), + GFP_KERNEL); if (!path) return -ENOMEM; strscpy(path, buf, count + 1); + kfree(filter->memcg_path); filter->memcg_path = path; return count; } @@ -463,6 +515,44 @@ static ssize_t addr_end_store(struct kobject *kobj, return err ? err : count; } +static ssize_t min_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + + return sysfs_emit(buf, "%lu\n", filter->sz_range.min); +} + +static ssize_t min_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + int err = kstrtoul(buf, 0, &filter->sz_range.min); + + return err ? err : count; +} + +static ssize_t max_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + + return sysfs_emit(buf, "%lu\n", filter->sz_range.max); +} + +static ssize_t max_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + int err = kstrtoul(buf, 0, &filter->sz_range.max); + + return err ? err : count; +} + static ssize_t damon_target_idx_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -497,6 +587,9 @@ static struct kobj_attribute damon_sysfs_scheme_filter_type_attr = static struct kobj_attribute damon_sysfs_scheme_filter_matching_attr = __ATTR_RW_MODE(matching, 0600); +static struct kobj_attribute damon_sysfs_scheme_filter_allow_attr = + __ATTR_RW_MODE(allow, 0600); + static struct kobj_attribute damon_sysfs_scheme_filter_memcg_path_attr = __ATTR_RW_MODE(memcg_path, 0600); @@ -506,15 +599,24 @@ static struct kobj_attribute damon_sysfs_scheme_filter_addr_start_attr = static struct kobj_attribute damon_sysfs_scheme_filter_addr_end_attr = __ATTR_RW_MODE(addr_end, 0600); +static struct kobj_attribute damon_sysfs_scheme_filter_min_attr = + __ATTR_RW_MODE(min, 0600); + +static struct kobj_attribute damon_sysfs_scheme_filter_max_attr = + __ATTR_RW_MODE(max, 0600); + static struct kobj_attribute damon_sysfs_scheme_filter_damon_target_idx_attr = __ATTR_RW_MODE(damon_target_idx, 0600); static struct attribute *damon_sysfs_scheme_filter_attrs[] = { &damon_sysfs_scheme_filter_type_attr.attr, &damon_sysfs_scheme_filter_matching_attr.attr, + &damon_sysfs_scheme_filter_allow_attr.attr, &damon_sysfs_scheme_filter_memcg_path_attr.attr, &damon_sysfs_scheme_filter_addr_start_attr.attr, &damon_sysfs_scheme_filter_addr_end_attr.attr, + &damon_sysfs_scheme_filter_min_attr.attr, + &damon_sysfs_scheme_filter_max_attr.attr, &damon_sysfs_scheme_filter_damon_target_idx_attr.attr, NULL, }; @@ -532,14 +634,20 @@ static const struct kobj_type damon_sysfs_scheme_filter_ktype = { struct damon_sysfs_scheme_filters { struct kobject kobj; + enum damos_sysfs_filter_handle_layer handle_layer; struct damon_sysfs_scheme_filter **filters_arr; int nr; }; static struct damon_sysfs_scheme_filters * -damon_sysfs_scheme_filters_alloc(void) +damon_sysfs_scheme_filters_alloc(enum damos_sysfs_filter_handle_layer layer) { - return kzalloc(sizeof(struct damon_sysfs_scheme_filters), GFP_KERNEL); + struct damon_sysfs_scheme_filters *filters; + + filters = kzalloc(sizeof(struct damon_sysfs_scheme_filters), GFP_KERNEL); + if (filters) + filters->handle_layer = layer; + return filters; } static void damon_sysfs_scheme_filters_rm_dirs( @@ -572,7 +680,8 @@ static int damon_sysfs_scheme_filters_add_dirs( filters->filters_arr = filters_arr; for (i = 0; i < nr_filters; i++) { - filter = damon_sysfs_scheme_filter_alloc(); + filter = damon_sysfs_scheme_filter_alloc( + filters->handle_layer); if (!filter) { damon_sysfs_scheme_filters_rm_dirs(filters); return -ENOMEM; @@ -829,12 +938,15 @@ struct damos_sysfs_quota_goal { enum damos_quota_goal_metric metric; unsigned long target_value; unsigned long current_value; + int nid; }; -/* This should match with enum damos_action */ +/* This should match with enum damos_quota_goal_metric */ static const char * const damos_sysfs_quota_goal_metric_strs[] = { "user_input", "some_mem_psi_us", + "node_mem_used_bp", + "node_mem_free_bp", }; static struct damos_sysfs_quota_goal *damos_sysfs_quota_goal_alloc(void) @@ -907,6 +1019,28 @@ static ssize_t current_value_store(struct kobject *kobj, return err ? err : count; } +static ssize_t nid_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damos_sysfs_quota_goal *goal = container_of(kobj, struct + damos_sysfs_quota_goal, kobj); + + /* todo: return error if the goal is not using nid */ + + return sysfs_emit(buf, "%d\n", goal->nid); +} + +static ssize_t nid_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damos_sysfs_quota_goal *goal = container_of(kobj, struct + damos_sysfs_quota_goal, kobj); + int err = kstrtoint(buf, 0, &goal->nid); + + /* feed callback should check existence of this file and read value */ + return err ? err : count; +} + static void damos_sysfs_quota_goal_release(struct kobject *kobj) { /* or, notify this release to the feed callback */ @@ -922,10 +1056,14 @@ static struct kobj_attribute damos_sysfs_quota_goal_target_value_attr = static struct kobj_attribute damos_sysfs_quota_goal_current_value_attr = __ATTR_RW_MODE(current_value, 0600); +static struct kobj_attribute damos_sysfs_quota_goal_nid_attr = + __ATTR_RW_MODE(nid, 0600); + static struct attribute *damos_sysfs_quota_goal_attrs[] = { &damos_sysfs_quota_goal_target_metric_attr.attr, &damos_sysfs_quota_goal_target_value_attr.attr, &damos_sysfs_quota_goal_current_value_attr.attr, + &damos_sysfs_quota_goal_nid_attr.attr, NULL, }; ATTRIBUTE_GROUPS(damos_sysfs_quota_goal); @@ -1365,7 +1503,7 @@ static int damon_sysfs_access_pattern_add_range_dir( if (!range) return -ENOMEM; err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype, - &access_pattern->kobj, name); + &access_pattern->kobj, "%s", name); if (err) kobject_put(&range->kobj); else @@ -1441,9 +1579,12 @@ struct damon_sysfs_scheme { unsigned long apply_interval_us; struct damon_sysfs_quotas *quotas; struct damon_sysfs_watermarks *watermarks; + struct damon_sysfs_scheme_filters *core_filters; + struct damon_sysfs_scheme_filters *ops_filters; struct damon_sysfs_scheme_filters *filters; struct damon_sysfs_stats *stats; struct damon_sysfs_scheme_regions *tried_regions; + int target_nid; }; /* This should match with enum damos_action */ @@ -1455,6 +1596,8 @@ static const char * const damon_sysfs_damos_action_strs[] = { "nohugepage", "lru_prio", "lru_deprio", + "migrate_hot", + "migrate_cold", "stat", }; @@ -1469,6 +1612,7 @@ static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc( scheme->kobj = (struct kobject){}; scheme->action = action; scheme->apply_interval_us = apply_interval_us; + scheme->target_nid = NUMA_NO_NODE; return scheme; } @@ -1537,21 +1681,53 @@ static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme) return err; } -static int damon_sysfs_scheme_set_filters(struct damon_sysfs_scheme *scheme) +static int damon_sysfs_scheme_set_filters(struct damon_sysfs_scheme *scheme, + enum damos_sysfs_filter_handle_layer layer, const char *name, + struct damon_sysfs_scheme_filters **filters_ptr) { struct damon_sysfs_scheme_filters *filters = - damon_sysfs_scheme_filters_alloc(); + damon_sysfs_scheme_filters_alloc(layer); int err; if (!filters) return -ENOMEM; err = kobject_init_and_add(&filters->kobj, &damon_sysfs_scheme_filters_ktype, &scheme->kobj, - "filters"); + "%s", name); if (err) kobject_put(&filters->kobj); else - scheme->filters = filters; + *filters_ptr = filters; + return err; +} + +static int damos_sysfs_set_filter_dirs(struct damon_sysfs_scheme *scheme) +{ + int err; + + err = damon_sysfs_scheme_set_filters(scheme, + DAMOS_SYSFS_FILTER_HANDLE_LAYER_BOTH, "filters", + &scheme->filters); + if (err) + return err; + err = damon_sysfs_scheme_set_filters(scheme, + DAMOS_SYSFS_FILTER_HANDLE_LAYER_CORE, "core_filters", + &scheme->core_filters); + if (err) + goto put_filters_out; + err = damon_sysfs_scheme_set_filters(scheme, + DAMOS_SYSFS_FILTER_HANDLE_LAYER_OPS, "ops_filters", + &scheme->ops_filters); + if (err) + goto put_core_filters_out; + return 0; + +put_core_filters_out: + kobject_put(&scheme->core_filters->kobj); + scheme->core_filters = NULL; +put_filters_out: + kobject_put(&scheme->filters->kobj); + scheme->filters = NULL; return err; } @@ -1603,7 +1779,7 @@ static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme) err = damon_sysfs_scheme_set_watermarks(scheme); if (err) goto put_quotas_access_pattern_out; - err = damon_sysfs_scheme_set_filters(scheme); + err = damos_sysfs_set_filter_dirs(scheme); if (err) goto put_watermarks_quotas_access_pattern_out; err = damon_sysfs_scheme_set_stats(scheme); @@ -1618,6 +1794,10 @@ put_tried_regions_out: kobject_put(&scheme->tried_regions->kobj); scheme->tried_regions = NULL; put_filters_watermarks_quotas_access_pattern_out: + kobject_put(&scheme->ops_filters->kobj); + scheme->ops_filters = NULL; + kobject_put(&scheme->core_filters->kobj); + scheme->core_filters = NULL; kobject_put(&scheme->filters->kobj); scheme->filters = NULL; put_watermarks_quotas_access_pattern_out: @@ -1641,6 +1821,10 @@ static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme) kobject_put(&scheme->watermarks->kobj); damon_sysfs_scheme_filters_rm_dirs(scheme->filters); kobject_put(&scheme->filters->kobj); + damon_sysfs_scheme_filters_rm_dirs(scheme->core_filters); + kobject_put(&scheme->core_filters->kobj); + damon_sysfs_scheme_filters_rm_dirs(scheme->ops_filters); + kobject_put(&scheme->ops_filters->kobj); kobject_put(&scheme->stats->kobj); damon_sysfs_scheme_regions_rm_dirs(scheme->tried_regions); kobject_put(&scheme->tried_regions->kobj); @@ -1691,6 +1875,28 @@ static ssize_t apply_interval_us_store(struct kobject *kobj, return err ? err : count; } +static ssize_t target_nid_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_scheme *scheme = container_of(kobj, + struct damon_sysfs_scheme, kobj); + + return sysfs_emit(buf, "%d\n", scheme->target_nid); +} + +static ssize_t target_nid_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_scheme *scheme = container_of(kobj, + struct damon_sysfs_scheme, kobj); + int err = 0; + + /* TODO: error handling for target_nid range. */ + err = kstrtoint(buf, 0, &scheme->target_nid); + + return err ? err : count; +} + static void damon_sysfs_scheme_release(struct kobject *kobj) { kfree(container_of(kobj, struct damon_sysfs_scheme, kobj)); @@ -1702,9 +1908,13 @@ static struct kobj_attribute damon_sysfs_scheme_action_attr = static struct kobj_attribute damon_sysfs_scheme_apply_interval_us_attr = __ATTR_RW_MODE(apply_interval_us, 0600); +static struct kobj_attribute damon_sysfs_scheme_target_nid_attr = + __ATTR_RW_MODE(target_nid, 0600); + static struct attribute *damon_sysfs_scheme_attrs[] = { &damon_sysfs_scheme_action_attr.attr, &damon_sysfs_scheme_apply_interval_us_attr.attr, + &damon_sysfs_scheme_target_nid_attr.attr, NULL, }; ATTRIBUTE_GROUPS(damon_sysfs_scheme); @@ -1856,7 +2066,7 @@ static int damon_sysfs_memcg_path_to_id(char *memcg_path, unsigned short *id) if (!memcg_path) return -EINVAL; - path = kmalloc(sizeof(*path) * PATH_MAX, GFP_KERNEL); + path = kmalloc_array(PATH_MAX, sizeof(*path), GFP_KERNEL); if (!path) return -ENOMEM; @@ -1876,21 +2086,18 @@ static int damon_sysfs_memcg_path_to_id(char *memcg_path, unsigned short *id) return found ? 0 : -EINVAL; } -static int damon_sysfs_set_scheme_filters(struct damos *scheme, +static int damon_sysfs_add_scheme_filters(struct damos *scheme, struct damon_sysfs_scheme_filters *sysfs_filters) { int i; - struct damos_filter *filter, *next; - - damos_for_each_filter_safe(filter, next, scheme) - damos_destroy_filter(filter); for (i = 0; i < sysfs_filters->nr; i++) { struct damon_sysfs_scheme_filter *sysfs_filter = sysfs_filters->filters_arr[i]; struct damos_filter *filter = damos_new_filter(sysfs_filter->type, - sysfs_filter->matching); + sysfs_filter->matching, + sysfs_filter->allow); int err; if (!filter) @@ -1912,6 +2119,13 @@ static int damon_sysfs_set_scheme_filters(struct damos *scheme, filter->addr_range = sysfs_filter->addr_range; } else if (filter->type == DAMOS_FILTER_TYPE_TARGET) { filter->target_idx = sysfs_filter->target_idx; + } else if (filter->type == DAMOS_FILTER_TYPE_HUGEPAGE_SIZE) { + if (sysfs_filter->sz_range.min > + sysfs_filter->sz_range.max) { + damos_destroy_filter(filter); + return -EINVAL; + } + filter->sz_range = sysfs_filter->sz_range; } damos_add_filter(scheme, filter); @@ -1919,16 +2133,13 @@ static int damon_sysfs_set_scheme_filters(struct damos *scheme, return 0; } -static int damos_sysfs_set_quota_score( +static int damos_sysfs_add_quota_score( struct damos_sysfs_quota_goals *sysfs_goals, struct damos_quota *quota) { - struct damos_quota_goal *goal, *next; + struct damos_quota_goal *goal; int i; - damos_for_each_quota_goal_safe(goal, next, quota) - damos_destroy_quota_goal(goal); - for (i = 0; i < sysfs_goals->nr; i++) { struct damos_sysfs_quota_goal *sysfs_goal = sysfs_goals->goals_arr[i]; @@ -1940,8 +2151,17 @@ static int damos_sysfs_set_quota_score( sysfs_goal->target_value); if (!goal) return -ENOMEM; - if (sysfs_goal->metric == DAMOS_QUOTA_USER_INPUT) + switch (sysfs_goal->metric) { + case DAMOS_QUOTA_USER_INPUT: goal->current_value = sysfs_goal->current_value; + break; + case DAMOS_QUOTA_NODE_MEM_USED_BP: + case DAMOS_QUOTA_NODE_MEM_FREE_BP: + goal->nid = sysfs_goal->nid; + break; + default: + break; + } damos_add_quota_goal(quota, goal); } return 0; @@ -1951,10 +2171,13 @@ int damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx) { struct damos *scheme; + struct damos_quota quota = {}; int i = 0; + INIT_LIST_HEAD("a.goals); damon_for_each_scheme(scheme, ctx) { struct damon_sysfs_scheme *sysfs_scheme; + struct damos_quota_goal *g, *g_next; int err; /* user could have removed the scheme sysfs dir */ @@ -1962,10 +2185,17 @@ int damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes, break; sysfs_scheme = sysfs_schemes->schemes_arr[i]; - err = damos_sysfs_set_quota_score(sysfs_scheme->quotas->goals, - &scheme->quota); + err = damos_sysfs_add_quota_score(sysfs_scheme->quotas->goals, + "a); + if (err) { + damos_for_each_quota_goal_safe(g, g_next, "a) + damos_destroy_quota_goal(g); + return err; + } + err = damos_commit_quota_goals(&scheme->quota, "a); + damos_for_each_quota_goal_safe(g, g_next, "a) + damos_destroy_quota_goal(g); if (err) - /* kdamond will clean up schemes and terminated */ return err; i++; } @@ -2000,8 +2230,6 @@ static struct damos *damon_sysfs_mk_scheme( struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas; struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights; struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks; - struct damon_sysfs_scheme_filters *sysfs_filters = - sysfs_scheme->filters; struct damos *scheme; int err; @@ -2030,84 +2258,41 @@ static struct damos *damon_sysfs_mk_scheme( }; scheme = damon_new_scheme(&pattern, sysfs_scheme->action, - sysfs_scheme->apply_interval_us, "a, &wmarks); + sysfs_scheme->apply_interval_us, "a, &wmarks, + sysfs_scheme->target_nid); if (!scheme) return NULL; - err = damos_sysfs_set_quota_score(sysfs_quotas->goals, &scheme->quota); + err = damos_sysfs_add_quota_score(sysfs_quotas->goals, &scheme->quota); if (err) { damon_destroy_scheme(scheme); return NULL; } - err = damon_sysfs_set_scheme_filters(scheme, sysfs_filters); + err = damon_sysfs_add_scheme_filters(scheme, sysfs_scheme->core_filters); if (err) { damon_destroy_scheme(scheme); return NULL; } - return scheme; -} - -static void damon_sysfs_update_scheme(struct damos *scheme, - struct damon_sysfs_scheme *sysfs_scheme) -{ - struct damon_sysfs_access_pattern *access_pattern = - sysfs_scheme->access_pattern; - struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas; - struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights; - struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks; - int err; - - scheme->pattern.min_sz_region = access_pattern->sz->min; - scheme->pattern.max_sz_region = access_pattern->sz->max; - scheme->pattern.min_nr_accesses = access_pattern->nr_accesses->min; - scheme->pattern.max_nr_accesses = access_pattern->nr_accesses->max; - scheme->pattern.min_age_region = access_pattern->age->min; - scheme->pattern.max_age_region = access_pattern->age->max; - - scheme->action = sysfs_scheme->action; - scheme->apply_interval_us = sysfs_scheme->apply_interval_us; - - scheme->quota.ms = sysfs_quotas->ms; - scheme->quota.sz = sysfs_quotas->sz; - scheme->quota.reset_interval = sysfs_quotas->reset_interval_ms; - scheme->quota.weight_sz = sysfs_weights->sz; - scheme->quota.weight_nr_accesses = sysfs_weights->nr_accesses; - scheme->quota.weight_age = sysfs_weights->age; - - err = damos_sysfs_set_quota_score(sysfs_quotas->goals, &scheme->quota); + err = damon_sysfs_add_scheme_filters(scheme, sysfs_scheme->ops_filters); if (err) { damon_destroy_scheme(scheme); - return; + return NULL; } - - scheme->wmarks.metric = sysfs_wmarks->metric; - scheme->wmarks.interval = sysfs_wmarks->interval_us; - scheme->wmarks.high = sysfs_wmarks->high; - scheme->wmarks.mid = sysfs_wmarks->mid; - scheme->wmarks.low = sysfs_wmarks->low; - - err = damon_sysfs_set_scheme_filters(scheme, sysfs_scheme->filters); - if (err) + err = damon_sysfs_add_scheme_filters(scheme, sysfs_scheme->filters); + if (err) { damon_destroy_scheme(scheme); + return NULL; + } + return scheme; } -int damon_sysfs_set_schemes(struct damon_ctx *ctx, +int damon_sysfs_add_schemes(struct damon_ctx *ctx, struct damon_sysfs_schemes *sysfs_schemes) { - struct damos *scheme, *next; - int i = 0; - - damon_for_each_scheme_safe(scheme, next, ctx) { - if (i < sysfs_schemes->nr) - damon_sysfs_update_scheme(scheme, - sysfs_schemes->schemes_arr[i]); - else - damon_destroy_scheme(scheme); - i++; - } + int i; - for (; i < sysfs_schemes->nr; i++) { + for (i = 0; i < sysfs_schemes->nr; i++) { struct damos *scheme, *next; scheme = damon_sysfs_mk_scheme(sysfs_schemes->schemes_arr[i]); @@ -2140,32 +2325,32 @@ void damon_sysfs_schemes_update_stats( sysfs_stats->sz_tried = scheme->stat.sz_tried; sysfs_stats->nr_applied = scheme->stat.nr_applied; sysfs_stats->sz_applied = scheme->stat.sz_applied; + sysfs_stats->sz_ops_filter_passed = + scheme->stat.sz_ops_filter_passed; sysfs_stats->qt_exceeds = scheme->stat.qt_exceeds; } } -/* - * damon_sysfs_schemes that need to update its schemes regions dir. Protected - * by damon_sysfs_lock - */ -static struct damon_sysfs_schemes *damon_sysfs_schemes_for_damos_callback; -static int damon_sysfs_schemes_region_idx; -static bool damos_regions_upd_total_bytes_only; - -/* - * DAMON callback that called before damos apply. While this callback is - * registered, damon_sysfs_lock should be held to ensure the regions - * directories exist. +/** + * damos_sysfs_populate_region_dir() - Populate a schemes tried region dir. + * @sysfs_schemes: Schemes directory to populate regions directory. + * @ctx: Corresponding DAMON context. + * @t: DAMON target of @r. + * @r: DAMON region to populate the directory for. + * @s: Corresponding scheme. + * @total_bytes_only: Whether the request is for bytes update only. + * @sz_filter_passed: Bytes of @r that passed filters of @s. + * + * Called from DAMOS walk callback while holding damon_sysfs_lock. */ -static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx, - struct damon_target *t, struct damon_region *r, - struct damos *s) +void damos_sysfs_populate_region_dir(struct damon_sysfs_schemes *sysfs_schemes, + struct damon_ctx *ctx, struct damon_target *t, + struct damon_region *r, struct damos *s, bool total_bytes_only, + unsigned long sz_filter_passed) { struct damos *scheme; struct damon_sysfs_scheme_regions *sysfs_regions; struct damon_sysfs_scheme_region *region; - struct damon_sysfs_schemes *sysfs_schemes = - damon_sysfs_schemes_for_damos_callback; int schemes_idx = 0; damon_for_each_scheme(scheme, ctx) { @@ -2176,152 +2361,39 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx, /* user could have removed the scheme sysfs dir */ if (schemes_idx >= sysfs_schemes->nr) - return 0; + return; sysfs_regions = sysfs_schemes->schemes_arr[schemes_idx]->tried_regions; - if (sysfs_regions->upd_status == DAMOS_TRIED_REGIONS_UPD_FINISHED) - return 0; - if (sysfs_regions->upd_status == DAMOS_TRIED_REGIONS_UPD_IDLE) - sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_STARTED; sysfs_regions->total_bytes += r->ar.end - r->ar.start; - if (damos_regions_upd_total_bytes_only) - return 0; + if (total_bytes_only) + return; region = damon_sysfs_scheme_region_alloc(r); if (!region) - return 0; + return; + region->sz_filter_passed = sz_filter_passed; list_add_tail(®ion->list, &sysfs_regions->regions_list); sysfs_regions->nr_regions++; if (kobject_init_and_add(®ion->kobj, &damon_sysfs_scheme_region_ktype, &sysfs_regions->kobj, "%d", - damon_sysfs_schemes_region_idx++)) { + sysfs_regions->nr_regions++)) { kobject_put(®ion->kobj); } - return 0; } -/* - * DAMON callback that called after each accesses sampling. While this - * callback is registered, damon_sysfs_lock should be held to ensure the - * regions directories exist. - */ -void damos_sysfs_mark_finished_regions_updates(struct damon_ctx *ctx) +int damon_sysfs_schemes_clear_regions( + struct damon_sysfs_schemes *sysfs_schemes) { - struct damon_sysfs_schemes *sysfs_schemes = - damon_sysfs_schemes_for_damos_callback; - struct damon_sysfs_scheme_regions *sysfs_regions; int i; for (i = 0; i < sysfs_schemes->nr; i++) { - sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; - if (sysfs_regions->upd_status == - DAMOS_TRIED_REGIONS_UPD_STARTED || - time_after(jiffies, - sysfs_regions->upd_timeout_jiffies)) - sysfs_regions->upd_status = - DAMOS_TRIED_REGIONS_UPD_FINISHED; - } -} - -/* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */ -int damon_sysfs_schemes_clear_regions( - struct damon_sysfs_schemes *sysfs_schemes, - struct damon_ctx *ctx) -{ - struct damos *scheme; - int schemes_idx = 0; - - damon_for_each_scheme(scheme, ctx) { struct damon_sysfs_scheme *sysfs_scheme; - /* user could have removed the scheme sysfs dir */ - if (schemes_idx >= sysfs_schemes->nr) - break; - - sysfs_scheme = sysfs_schemes->schemes_arr[schemes_idx++]; + sysfs_scheme = sysfs_schemes->schemes_arr[i]; damon_sysfs_scheme_regions_rm_dirs( sysfs_scheme->tried_regions); sysfs_scheme->tried_regions->total_bytes = 0; } return 0; } - -static struct damos *damos_sysfs_nth_scheme(int n, struct damon_ctx *ctx) -{ - struct damos *scheme; - int i = 0; - - damon_for_each_scheme(scheme, ctx) { - if (i == n) - return scheme; - i++; - } - return NULL; -} - -static void damos_tried_regions_init_upd_status( - struct damon_sysfs_schemes *sysfs_schemes, - struct damon_ctx *ctx) -{ - int i; - struct damos *scheme; - struct damon_sysfs_scheme_regions *sysfs_regions; - - for (i = 0; i < sysfs_schemes->nr; i++) { - sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; - scheme = damos_sysfs_nth_scheme(i, ctx); - if (!scheme) { - sysfs_regions->upd_status = - DAMOS_TRIED_REGIONS_UPD_FINISHED; - continue; - } - sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE; - sysfs_regions->upd_timeout_jiffies = jiffies + - 2 * usecs_to_jiffies(scheme->apply_interval_us ? - scheme->apply_interval_us : - ctx->attrs.aggr_interval); - } -} - -/* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */ -int damon_sysfs_schemes_update_regions_start( - struct damon_sysfs_schemes *sysfs_schemes, - struct damon_ctx *ctx, bool total_bytes_only) -{ - damon_sysfs_schemes_clear_regions(sysfs_schemes, ctx); - damon_sysfs_schemes_for_damos_callback = sysfs_schemes; - damos_tried_regions_init_upd_status(sysfs_schemes, ctx); - damos_regions_upd_total_bytes_only = total_bytes_only; - ctx->callback.before_damos_apply = damon_sysfs_before_damos_apply; - return 0; -} - -bool damos_sysfs_regions_upd_done(void) -{ - struct damon_sysfs_schemes *sysfs_schemes = - damon_sysfs_schemes_for_damos_callback; - struct damon_sysfs_scheme_regions *sysfs_regions; - int i; - - for (i = 0; i < sysfs_schemes->nr; i++) { - sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; - if (sysfs_regions->upd_status != - DAMOS_TRIED_REGIONS_UPD_FINISHED) - return false; - } - return true; -} - -/* - * Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock. Caller - * should unlock damon_sysfs_lock which held before - * damon_sysfs_schemes_update_regions_start() - */ -int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx) -{ - damon_sysfs_schemes_for_damos_callback = NULL; - ctx->callback.before_damos_apply = NULL; - damon_sysfs_schemes_region_idx = 0; - return 0; -} diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 6fee383bc0c5..1af6aff35d84 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -409,6 +409,164 @@ static const struct kobj_type damon_sysfs_targets_ktype = { }; /* + * intervals goal directory + */ + +struct damon_sysfs_intervals_goal { + struct kobject kobj; + unsigned long access_bp; + unsigned long aggrs; + unsigned long min_sample_us; + unsigned long max_sample_us; +}; + +static struct damon_sysfs_intervals_goal *damon_sysfs_intervals_goal_alloc( + unsigned long access_bp, unsigned long aggrs, + unsigned long min_sample_us, unsigned long max_sample_us) +{ + struct damon_sysfs_intervals_goal *goal = kmalloc(sizeof(*goal), + GFP_KERNEL); + + if (!goal) + return NULL; + + goal->kobj = (struct kobject){}; + goal->access_bp = access_bp; + goal->aggrs = aggrs; + goal->min_sample_us = min_sample_us; + goal->max_sample_us = max_sample_us; + return goal; +} + +static ssize_t access_bp_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + + return sysfs_emit(buf, "%lu\n", goal->access_bp); +} + +static ssize_t access_bp_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + unsigned long nr; + int err = kstrtoul(buf, 0, &nr); + + if (err) + return err; + + goal->access_bp = nr; + return count; +} + +static ssize_t aggrs_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + + return sysfs_emit(buf, "%lu\n", goal->aggrs); +} + +static ssize_t aggrs_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + unsigned long nr; + int err = kstrtoul(buf, 0, &nr); + + if (err) + return err; + + goal->aggrs = nr; + return count; +} + +static ssize_t min_sample_us_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + + return sysfs_emit(buf, "%lu\n", goal->min_sample_us); +} + +static ssize_t min_sample_us_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + unsigned long nr; + int err = kstrtoul(buf, 0, &nr); + + if (err) + return err; + + goal->min_sample_us = nr; + return count; +} + +static ssize_t max_sample_us_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + + return sysfs_emit(buf, "%lu\n", goal->max_sample_us); +} + +static ssize_t max_sample_us_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + unsigned long nr; + int err = kstrtoul(buf, 0, &nr); + + if (err) + return err; + + goal->max_sample_us = nr; + return count; +} + +static void damon_sysfs_intervals_goal_release(struct kobject *kobj) +{ + kfree(container_of(kobj, struct damon_sysfs_intervals_goal, kobj)); +} + +static struct kobj_attribute damon_sysfs_intervals_goal_access_bp_attr = + __ATTR_RW_MODE(access_bp, 0600); + +static struct kobj_attribute damon_sysfs_intervals_goal_aggrs_attr = + __ATTR_RW_MODE(aggrs, 0600); + +static struct kobj_attribute damon_sysfs_intervals_goal_min_sample_us_attr = + __ATTR_RW_MODE(min_sample_us, 0600); + +static struct kobj_attribute damon_sysfs_intervals_goal_max_sample_us_attr = + __ATTR_RW_MODE(max_sample_us, 0600); + +static struct attribute *damon_sysfs_intervals_goal_attrs[] = { + &damon_sysfs_intervals_goal_access_bp_attr.attr, + &damon_sysfs_intervals_goal_aggrs_attr.attr, + &damon_sysfs_intervals_goal_min_sample_us_attr.attr, + &damon_sysfs_intervals_goal_max_sample_us_attr.attr, + NULL, +}; +ATTRIBUTE_GROUPS(damon_sysfs_intervals_goal); + +static const struct kobj_type damon_sysfs_intervals_goal_ktype = { + .release = damon_sysfs_intervals_goal_release, + .sysfs_ops = &kobj_sysfs_ops, + .default_groups = damon_sysfs_intervals_goal_groups, +}; + +/* * intervals directory */ @@ -417,6 +575,7 @@ struct damon_sysfs_intervals { unsigned long sample_us; unsigned long aggr_us; unsigned long update_us; + struct damon_sysfs_intervals_goal *intervals_goal; }; static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc( @@ -436,6 +595,32 @@ static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc( return intervals; } +static int damon_sysfs_intervals_add_dirs(struct damon_sysfs_intervals *intervals) +{ + struct damon_sysfs_intervals_goal *goal; + int err; + + goal = damon_sysfs_intervals_goal_alloc(0, 0, 0, 0); + if (!goal) + return -ENOMEM; + + err = kobject_init_and_add(&goal->kobj, + &damon_sysfs_intervals_goal_ktype, &intervals->kobj, + "intervals_goal"); + if (err) { + kobject_put(&goal->kobj); + intervals->intervals_goal = NULL; + return err; + } + intervals->intervals_goal = goal; + return 0; +} + +static void damon_sysfs_intervals_rm_dirs(struct damon_sysfs_intervals *intervals) +{ + kobject_put(&intervals->intervals_goal->kobj); +} + static ssize_t sample_us_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -571,6 +756,9 @@ static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs) "intervals"); if (err) goto put_intervals_out; + err = damon_sysfs_intervals_add_dirs(intervals); + if (err) + goto put_intervals_out; attrs->intervals = intervals; nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000); @@ -599,6 +787,7 @@ put_intervals_out: static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs) { kobject_put(&attrs->nr_regions_range->kobj); + damon_sysfs_intervals_rm_dirs(attrs->intervals); kobject_put(&attrs->intervals->kobj); } @@ -1025,6 +1214,11 @@ enum damon_sysfs_cmd { */ DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS, /* + * @DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS: Update the tuned monitoring + * intevals. + */ + DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS, + /* * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands. */ NR_DAMON_SYSFS_CMDS, @@ -1041,27 +1235,9 @@ static const char * const damon_sysfs_cmd_strs[] = { "update_schemes_tried_regions", "clear_schemes_tried_regions", "update_schemes_effective_quotas", + "update_tuned_intervals", }; -/* - * struct damon_sysfs_cmd_request - A request to the DAMON callback. - * @cmd: The command that needs to be handled by the callback. - * @kdamond: The kobject wrapper that associated to the kdamond thread. - * - * This structure represents a sysfs command request that need to access some - * DAMON context-internal data. Because DAMON context-internal data can be - * safely accessed from DAMON callbacks without additional synchronization, the - * request will be handled by the DAMON callback. None-``NULL`` @kdamond means - * the request is valid. - */ -struct damon_sysfs_cmd_request { - enum damon_sysfs_cmd cmd; - struct damon_sysfs_kdamond *kdamond; -}; - -/* Current DAMON callback request. Protected by damon_sysfs_lock. */ -static struct damon_sysfs_cmd_request damon_sysfs_cmd_request; - static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -1084,11 +1260,18 @@ static int damon_sysfs_set_attrs(struct damon_ctx *ctx, struct damon_sysfs_attrs *sys_attrs) { struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals; + struct damon_sysfs_intervals_goal *sys_goal = + sys_intervals->intervals_goal; struct damon_sysfs_ul_range *sys_nr_regions = sys_attrs->nr_regions_range; struct damon_attrs attrs = { .sample_interval = sys_intervals->sample_us, .aggr_interval = sys_intervals->aggr_us, + .intervals_goal = { + .access_bp = sys_goal->access_bp, + .aggrs = sys_goal->aggrs, + .min_sample_us = sys_goal->min_sample_us, + .max_sample_us = sys_goal->max_sample_us}, .ops_update_interval = sys_intervals->update_us, .min_nr_regions = sys_nr_regions->min, .max_nr_regions = sys_nr_regions->max, @@ -1162,72 +1345,16 @@ destroy_targets_out: return err; } -static int damon_sysfs_update_target_pid(struct damon_target *target, int pid) -{ - struct pid *pid_new; - - pid_new = find_get_pid(pid); - if (!pid_new) - return -EINVAL; - - if (pid_new == target->pid) { - put_pid(pid_new); - return 0; - } - - put_pid(target->pid); - target->pid = pid_new; - return 0; -} - -static int damon_sysfs_update_target(struct damon_target *target, - struct damon_ctx *ctx, - struct damon_sysfs_target *sys_target) -{ - int err = 0; - - if (damon_target_has_pid(ctx)) { - err = damon_sysfs_update_target_pid(target, sys_target->pid); - if (err) - return err; - } - - /* - * Do monitoring target region boundary update only if one or more - * regions are set by the user. This is for keeping current monitoring - * target results and range easier, especially for dynamic monitoring - * target regions update ops like 'vaddr'. - */ - if (sys_target->regions->nr) - err = damon_sysfs_set_regions(target, sys_target->regions); - return err; -} - -static int damon_sysfs_set_targets(struct damon_ctx *ctx, +static int damon_sysfs_add_targets(struct damon_ctx *ctx, struct damon_sysfs_targets *sysfs_targets) { - struct damon_target *t, *next; - int i = 0, err; + int i, err; /* Multiple physical address space monitoring targets makes no sense */ if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1) return -EINVAL; - damon_for_each_target_safe(t, next, ctx) { - if (i < sysfs_targets->nr) { - err = damon_sysfs_update_target(t, ctx, - sysfs_targets->targets_arr[i]); - if (err) - return err; - } else { - if (damon_target_has_pid(ctx)) - put_pid(t->pid); - damon_destroy_target(t); - } - i++; - } - - for (; i < sysfs_targets->nr; i++) { + for (i = 0; i < sysfs_targets->nr; i++) { struct damon_sysfs_target *st = sysfs_targets->targets_arr[i]; err = damon_sysfs_add_target(st, ctx); @@ -1237,25 +1364,9 @@ static int damon_sysfs_set_targets(struct damon_ctx *ctx, return 0; } -static bool damon_sysfs_schemes_regions_updating; - static void damon_sysfs_before_terminate(struct damon_ctx *ctx) { struct damon_target *t, *next; - struct damon_sysfs_kdamond *kdamond; - enum damon_sysfs_cmd cmd; - - /* damon_sysfs_schemes_update_regions_stop() might not yet called */ - kdamond = damon_sysfs_cmd_request.kdamond; - cmd = damon_sysfs_cmd_request.cmd; - if (kdamond && ctx == kdamond->damon_ctx && - (cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS || - cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES) && - damon_sysfs_schemes_regions_updating) { - damon_sysfs_schemes_update_regions_stop(ctx); - damon_sysfs_schemes_regions_updating = false; - mutex_unlock(&damon_sysfs_lock); - } if (!damon_target_has_pid(ctx)) return; @@ -1270,57 +1381,24 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx) /* * damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files. - * @kdamond: The kobject wrapper that associated to the kdamond thread. + * @data: The kobject wrapper that associated to the kdamond thread. * * This function reads the schemes stats of specific kdamond and update the * related values for sysfs files. This function should be called from DAMON - * callbacks while holding ``damon_syfs_lock``, to safely access the DAMON - * contexts-internal data and DAMON sysfs variables. + * worker thread,to safely access the DAMON contexts-internal data. Caller + * should also ensure holding ``damon_syfs_lock``, and ->damon_ctx of @data is + * not NULL but a valid pointer, to safely access DAMON sysfs variables. */ -static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *kdamond) +static int damon_sysfs_upd_schemes_stats(void *data) { + struct damon_sysfs_kdamond *kdamond = data; struct damon_ctx *ctx = kdamond->damon_ctx; - if (!ctx) - return -EINVAL; damon_sysfs_schemes_update_stats( kdamond->contexts->contexts_arr[0]->schemes, ctx); return 0; } -static int damon_sysfs_upd_schemes_regions_start( - struct damon_sysfs_kdamond *kdamond, bool total_bytes_only) -{ - struct damon_ctx *ctx = kdamond->damon_ctx; - - if (!ctx) - return -EINVAL; - return damon_sysfs_schemes_update_regions_start( - kdamond->contexts->contexts_arr[0]->schemes, ctx, - total_bytes_only); -} - -static int damon_sysfs_upd_schemes_regions_stop( - struct damon_sysfs_kdamond *kdamond) -{ - struct damon_ctx *ctx = kdamond->damon_ctx; - - if (!ctx) - return -EINVAL; - return damon_sysfs_schemes_update_regions_stop(ctx); -} - -static int damon_sysfs_clear_schemes_regions( - struct damon_sysfs_kdamond *kdamond) -{ - struct damon_ctx *ctx = kdamond->damon_ctx; - - if (!ctx) - return -EINVAL; - return damon_sysfs_schemes_clear_regions( - kdamond->contexts->contexts_arr[0]->schemes, ctx); -} - static inline bool damon_sysfs_kdamond_running( struct damon_sysfs_kdamond *kdamond) { @@ -1339,33 +1417,53 @@ static int damon_sysfs_apply_inputs(struct damon_ctx *ctx, err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs); if (err) return err; - err = damon_sysfs_set_targets(ctx, sys_ctx->targets); + err = damon_sysfs_add_targets(ctx, sys_ctx->targets); if (err) return err; - return damon_sysfs_set_schemes(ctx, sys_ctx->schemes); + return damon_sysfs_add_schemes(ctx, sys_ctx->schemes); } +static struct damon_ctx *damon_sysfs_build_ctx( + struct damon_sysfs_context *sys_ctx); + /* * damon_sysfs_commit_input() - Commit user inputs to a running kdamond. * @kdamond: The kobject wrapper for the associated kdamond. * - * If the sysfs input is wrong, the kdamond will be terminated. + * Returns error if the sysfs input is wrong. */ -static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond) +static int damon_sysfs_commit_input(void *data) { + struct damon_sysfs_kdamond *kdamond = data; + struct damon_ctx *param_ctx, *test_ctx; + int err; + if (!damon_sysfs_kdamond_running(kdamond)) return -EINVAL; /* TODO: Support multiple contexts per kdamond */ if (kdamond->contexts->nr != 1) return -EINVAL; - return damon_sysfs_apply_inputs(kdamond->damon_ctx, - kdamond->contexts->contexts_arr[0]); + param_ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]); + if (IS_ERR(param_ctx)) + return PTR_ERR(param_ctx); + test_ctx = damon_new_ctx(); + err = damon_commit_ctx(test_ctx, param_ctx); + if (err) { + damon_sysfs_destroy_targets(test_ctx); + damon_destroy_ctx(test_ctx); + goto out; + } + err = damon_commit_ctx(kdamond->damon_ctx, param_ctx); +out: + damon_sysfs_destroy_targets(param_ctx); + damon_destroy_ctx(param_ctx); + return err; } -static int damon_sysfs_commit_schemes_quota_goals( - struct damon_sysfs_kdamond *sysfs_kdamond) +static int damon_sysfs_commit_schemes_quota_goals(void *data) { + struct damon_sysfs_kdamond *sysfs_kdamond = data; struct damon_ctx *ctx; struct damon_sysfs_context *sysfs_ctx; @@ -1383,128 +1481,33 @@ static int damon_sysfs_commit_schemes_quota_goals( /* * damon_sysfs_upd_schemes_effective_quotas() - Update schemes effective quotas * sysfs files. - * @kdamond: The kobject wrapper that associated to the kdamond thread. + * @data: The kobject wrapper that associated to the kdamond thread. * * This function reads the schemes' effective quotas of specific kdamond and * update the related values for sysfs files. This function should be called * from DAMON callbacks while holding ``damon_syfs_lock``, to safely access the * DAMON contexts-internal data and DAMON sysfs variables. */ -static int damon_sysfs_upd_schemes_effective_quotas( - struct damon_sysfs_kdamond *kdamond) +static int damon_sysfs_upd_schemes_effective_quotas(void *data) { + struct damon_sysfs_kdamond *kdamond = data; struct damon_ctx *ctx = kdamond->damon_ctx; - if (!ctx) - return -EINVAL; damos_sysfs_update_effective_quotas( kdamond->contexts->contexts_arr[0]->schemes, ctx); return 0; } - -/* - * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests. - * @c: The DAMON context of the callback. - * @active: Whether @c is not deactivated due to watermarks. - * @after_aggr: Whether this is called from after_aggregation() callback. - * - * This function is periodically called back from the kdamond thread for @c. - * Then, it checks if there is a waiting DAMON sysfs request and handles it. - */ -static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active, - bool after_aggregation) +static int damon_sysfs_upd_tuned_intervals(void *data) { - struct damon_sysfs_kdamond *kdamond; - bool total_bytes_only = false; - int err = 0; - - /* avoid deadlock due to concurrent state_store('off') */ - if (!damon_sysfs_schemes_regions_updating && - !mutex_trylock(&damon_sysfs_lock)) - return 0; - kdamond = damon_sysfs_cmd_request.kdamond; - if (!kdamond || kdamond->damon_ctx != c) - goto out; - switch (damon_sysfs_cmd_request.cmd) { - case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: - err = damon_sysfs_upd_schemes_stats(kdamond); - break; - case DAMON_SYSFS_CMD_COMMIT: - if (!after_aggregation) - goto out; - err = damon_sysfs_commit_input(kdamond); - break; - case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS: - err = damon_sysfs_commit_schemes_quota_goals(kdamond); - break; - case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES: - total_bytes_only = true; - fallthrough; - case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS: - if (!damon_sysfs_schemes_regions_updating) { - err = damon_sysfs_upd_schemes_regions_start(kdamond, - total_bytes_only); - if (!err) { - damon_sysfs_schemes_regions_updating = true; - goto keep_lock_out; - } - } else { - damos_sysfs_mark_finished_regions_updates(c); - /* - * Continue regions updating if DAMON is till - * active and the update for all schemes is not - * finished. - */ - if (active && !damos_sysfs_regions_upd_done()) - goto keep_lock_out; - err = damon_sysfs_upd_schemes_regions_stop(kdamond); - damon_sysfs_schemes_regions_updating = false; - } - break; - case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS: - err = damon_sysfs_clear_schemes_regions(kdamond); - break; - case DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS: - err = damon_sysfs_upd_schemes_effective_quotas(kdamond); - break; - default: - break; - } - /* Mark the request as invalid now. */ - damon_sysfs_cmd_request.kdamond = NULL; -out: - if (!damon_sysfs_schemes_regions_updating) - mutex_unlock(&damon_sysfs_lock); -keep_lock_out: - return err; -} - -static int damon_sysfs_after_wmarks_check(struct damon_ctx *c) -{ - /* - * after_wmarks_check() is called back while the context is deactivated - * by watermarks. - */ - return damon_sysfs_cmd_request_callback(c, false, false); -} - -static int damon_sysfs_after_sampling(struct damon_ctx *c) -{ - /* - * after_sampling() is called back only while the context is not - * deactivated by watermarks. - */ - return damon_sysfs_cmd_request_callback(c, true, false); -} + struct damon_sysfs_kdamond *kdamond = data; + struct damon_ctx *ctx = kdamond->damon_ctx; -static int damon_sysfs_after_aggregation(struct damon_ctx *c) -{ - /* - * after_aggregation() is called back only while the context is not - * deactivated by watermarks. - */ - return damon_sysfs_cmd_request_callback(c, true, true); + kdamond->contexts->contexts_arr[0]->attrs->intervals->sample_us = + ctx->attrs.sample_interval; + kdamond->contexts->contexts_arr[0]->attrs->intervals->aggr_us = + ctx->attrs.aggr_interval; + return 0; } static struct damon_ctx *damon_sysfs_build_ctx( @@ -1522,9 +1525,6 @@ static struct damon_ctx *damon_sysfs_build_ctx( return ERR_PTR(err); } - ctx->callback.after_wmarks_check = damon_sysfs_after_wmarks_check; - ctx->callback.after_sampling = damon_sysfs_after_sampling; - ctx->callback.after_aggregation = damon_sysfs_after_aggregation; ctx->callback.before_terminate = damon_sysfs_before_terminate; return ctx; } @@ -1536,8 +1536,6 @@ static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond) if (damon_sysfs_kdamond_running(kdamond)) return -EBUSY; - if (damon_sysfs_cmd_request.kdamond == kdamond) - return -EBUSY; /* TODO: support multiple contexts per kdamond */ if (kdamond->contexts->nr != 1) return -EINVAL; @@ -1570,63 +1568,102 @@ static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond) */ } +static int damon_sysfs_damon_call(int (*fn)(void *data), + struct damon_sysfs_kdamond *kdamond) +{ + struct damon_call_control call_control = {}; + + if (!kdamond->damon_ctx) + return -EINVAL; + call_control.fn = fn; + call_control.data = kdamond; + return damon_call(kdamond->damon_ctx, &call_control); +} + +struct damon_sysfs_schemes_walk_data { + struct damon_sysfs_kdamond *sysfs_kdamond; + bool total_bytes_only; +}; + +/* populate the region directory */ +static void damon_sysfs_schemes_tried_regions_upd_one(void *data, struct damon_ctx *ctx, + struct damon_target *t, struct damon_region *r, + struct damos *s, unsigned long sz_filter_passed) +{ + struct damon_sysfs_schemes_walk_data *walk_data = data; + struct damon_sysfs_kdamond *sysfs_kdamond = walk_data->sysfs_kdamond; + + damos_sysfs_populate_region_dir( + sysfs_kdamond->contexts->contexts_arr[0]->schemes, + ctx, t, r, s, walk_data->total_bytes_only, + sz_filter_passed); +} + +static int damon_sysfs_update_schemes_tried_regions( + struct damon_sysfs_kdamond *sysfs_kdamond, bool total_bytes_only) +{ + struct damon_sysfs_schemes_walk_data walk_data = { + .sysfs_kdamond = sysfs_kdamond, + .total_bytes_only = total_bytes_only, + }; + struct damos_walk_control control = { + .walk_fn = damon_sysfs_schemes_tried_regions_upd_one, + .data = &walk_data, + }; + struct damon_ctx *ctx = sysfs_kdamond->damon_ctx; + + if (!ctx) + return -EINVAL; + + damon_sysfs_schemes_clear_regions( + sysfs_kdamond->contexts->contexts_arr[0]->schemes); + return damos_walk(ctx, &control); +} + /* * damon_sysfs_handle_cmd() - Handle a command for a specific kdamond. * @cmd: The command to handle. * @kdamond: The kobject wrapper for the associated kdamond. * - * This function handles a DAMON sysfs command for a kdamond. For commands - * that need to access running DAMON context-internal data, it requests - * handling of the command to the DAMON callback - * (@damon_sysfs_cmd_request_callback()) and wait until it is properly handled, - * or the context is completed. + * This function handles a DAMON sysfs command for a kdamond. * * Return: 0 on success, negative error code otherwise. */ static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd, struct damon_sysfs_kdamond *kdamond) { - bool need_wait = true; - - /* Handle commands that doesn't access DAMON context-internal data */ switch (cmd) { case DAMON_SYSFS_CMD_ON: return damon_sysfs_turn_damon_on(kdamond); case DAMON_SYSFS_CMD_OFF: return damon_sysfs_turn_damon_off(kdamond); + case DAMON_SYSFS_CMD_COMMIT: + return damon_sysfs_damon_call( + damon_sysfs_commit_input, kdamond); + case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS: + return damon_sysfs_damon_call( + damon_sysfs_commit_schemes_quota_goals, + kdamond); + case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: + return damon_sysfs_damon_call( + damon_sysfs_upd_schemes_stats, kdamond); + case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES: + return damon_sysfs_update_schemes_tried_regions(kdamond, true); + case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS: + return damon_sysfs_update_schemes_tried_regions(kdamond, false); + case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS: + return damon_sysfs_schemes_clear_regions( + kdamond->contexts->contexts_arr[0]->schemes); + case DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS: + return damon_sysfs_damon_call( + damon_sysfs_upd_schemes_effective_quotas, + kdamond); + case DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS: + return damon_sysfs_damon_call( + damon_sysfs_upd_tuned_intervals, kdamond); default: - break; - } - - /* Pass the command to DAMON callback for safe DAMON context access */ - if (damon_sysfs_cmd_request.kdamond) - return -EBUSY; - if (!damon_sysfs_kdamond_running(kdamond)) return -EINVAL; - damon_sysfs_cmd_request.cmd = cmd; - damon_sysfs_cmd_request.kdamond = kdamond; - - /* - * wait until damon_sysfs_cmd_request_callback() handles the request - * from kdamond context - */ - mutex_unlock(&damon_sysfs_lock); - while (need_wait) { - schedule_timeout_idle(msecs_to_jiffies(100)); - if (!mutex_trylock(&damon_sysfs_lock)) - continue; - if (!damon_sysfs_cmd_request.kdamond) { - /* damon_sysfs_cmd_request_callback() handled */ - need_wait = false; - } else if (!damon_sysfs_kdamond_running(kdamond)) { - /* kdamond has already finished */ - need_wait = false; - damon_sysfs_cmd_request.kdamond = NULL; - } - mutex_unlock(&damon_sysfs_lock); } - mutex_lock(&damon_sysfs_lock); - return 0; } static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, @@ -1738,8 +1775,7 @@ static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond **kdamonds, int i; for (i = 0; i < nr_kdamonds; i++) { - if (damon_sysfs_kdamond_running(kdamonds[i]) || - damon_sysfs_cmd_request.kdamond == kdamonds[i]) + if (damon_sysfs_kdamond_running(kdamonds[i])) return true; } @@ -1927,4 +1963,4 @@ out: } subsys_initcall(damon_sysfs_init); -#include "sysfs-test.h" +#include "tests/sysfs-kunit.h" diff --git a/mm/damon/tests/.kunitconfig b/mm/damon/tests/.kunitconfig new file mode 100644 index 000000000000..36a450f57b58 --- /dev/null +++ b/mm/damon/tests/.kunitconfig @@ -0,0 +1,15 @@ +# for DAMON core +CONFIG_KUNIT=y +CONFIG_DAMON=y +CONFIG_DAMON_KUNIT_TEST=y + +# for DAMON vaddr ops +CONFIG_MMU=y +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_VADDR_KUNIT_TEST=y + +# for DAMON sysfs interface +CONFIG_SYSFS=y +CONFIG_DAMON_SYSFS=y +CONFIG_DAMON_SYSFS_KUNIT_TEST=y diff --git a/mm/damon/core-test.h b/mm/damon/tests/core-kunit.h index 0cee634f3544..298c67557fae 100644 --- a/mm/damon/core-test.h +++ b/mm/damon/tests/core-kunit.h @@ -246,16 +246,20 @@ static void damon_test_split_regions_of(struct kunit *test) static void damon_test_ops_registration(struct kunit *test) { struct damon_ctx *c = damon_new_ctx(); - struct damon_operations ops, bak; + struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak; + bool need_cleanup = false; + + /* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */ + if (!damon_is_registered_ops(DAMON_OPS_VADDR)) { + bak.id = DAMON_OPS_VADDR; + KUNIT_EXPECT_EQ(test, damon_register_ops(&bak), 0); + need_cleanup = true; + } - /* DAMON_OPS_{V,P}ADDR are registered on subsys_initcall */ + /* DAMON_OPS_VADDR is ensured to be registered */ KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0); - KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_PADDR), 0); /* Double-registration is prohibited */ - ops.id = DAMON_OPS_VADDR; - KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); - ops.id = DAMON_OPS_PADDR; KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); /* Unknown ops id cannot be registered */ @@ -278,6 +282,13 @@ static void damon_test_ops_registration(struct kunit *test) KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); damon_destroy_ctx(c); + + if (need_cleanup) { + mutex_lock(&damon_ops_lock); + damon_registered_ops[DAMON_OPS_VADDR] = + (struct damon_operations){}; + mutex_unlock(&damon_ops_lock); + } } static void damon_test_set_regions(struct kunit *test) @@ -309,6 +320,18 @@ static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test) .aggr_interval = ((unsigned long)UINT_MAX + 1) * 10 }; + /* + * In some cases such as 32bit architectures where UINT_MAX is + * ULONG_MAX, attrs.aggr_interval becomes zero. Calling + * damon_nr_accesses_to_accesses_bp() in the case will cause + * divide-by-zero. Such case is prohibited in normal execution since + * the caution is documented on the comment for the function, and + * damon_update_monitoring_results() does the check. Skip the test in + * the case. + */ + if (!attrs.aggr_interval) + kunit_skip(test, "aggr_interval is zero."); + KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0); } @@ -325,19 +348,19 @@ static void damon_test_update_monitoring_result(struct kunit *test) new_attrs = (struct damon_attrs){ .sample_interval = 100, .aggr_interval = 10000,}; - damon_update_monitoring_result(r, &old_attrs, &new_attrs); + damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); KUNIT_EXPECT_EQ(test, r->nr_accesses, 15); KUNIT_EXPECT_EQ(test, r->age, 2); new_attrs = (struct damon_attrs){ .sample_interval = 1, .aggr_interval = 1000}; - damon_update_monitoring_result(r, &old_attrs, &new_attrs); + damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); KUNIT_EXPECT_EQ(test, r->age, 2); new_attrs = (struct damon_attrs){ .sample_interval = 1, .aggr_interval = 100}; - damon_update_monitoring_result(r, &old_attrs, &new_attrs); + damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); KUNIT_EXPECT_EQ(test, r->age, 20); @@ -388,7 +411,7 @@ static void damos_test_new_filter(struct kunit *test) { struct damos_filter *filter; - filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true); + filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false); KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON); KUNIT_EXPECT_EQ(test, filter->matching, true); KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list); @@ -402,7 +425,7 @@ static void damos_test_filter_out(struct kunit *test) struct damon_region *r, *r2; struct damos_filter *f; - f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true); + f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true, false); f->addr_range = (struct damon_addr_range){ .start = DAMON_MIN_REGION * 2, .end = DAMON_MIN_REGION * 6}; @@ -411,25 +434,25 @@ static void damos_test_filter_out(struct kunit *test) damon_add_region(r, t); /* region in the range */ - KUNIT_EXPECT_TRUE(test, __damos_filter_out(NULL, t, r, f)); + KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f)); KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); /* region before the range */ r->ar.start = DAMON_MIN_REGION * 1; r->ar.end = DAMON_MIN_REGION * 2; - KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f)); + KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f)); KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); /* region after the range */ r->ar.start = DAMON_MIN_REGION * 6; r->ar.end = DAMON_MIN_REGION * 8; - KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f)); + KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f)); KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); /* region started before the range */ r->ar.start = DAMON_MIN_REGION * 1; r->ar.end = DAMON_MIN_REGION * 4; - KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f)); + KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f)); /* filter should have split the region */ KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 1); KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 2); @@ -442,7 +465,7 @@ static void damos_test_filter_out(struct kunit *test) /* region started in the range */ r->ar.start = DAMON_MIN_REGION * 2; r->ar.end = DAMON_MIN_REGION * 8; - KUNIT_EXPECT_TRUE(test, __damos_filter_out(NULL, t, r, f)); + KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f)); /* filter should have split the region */ KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 2); KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 6); @@ -487,6 +510,75 @@ static void damon_test_feed_loop_next_input(struct kunit *test) damon_feed_loop_next_input(last_input, 2000)); } +static void damon_test_set_filters_default_reject(struct kunit *test) +{ + struct damos scheme; + struct damos_filter *target_filter, *anon_filter; + + INIT_LIST_HEAD(&scheme.filters); + INIT_LIST_HEAD(&scheme.ops_filters); + + damos_set_filters_default_reject(&scheme); + /* + * No filter is installed. Allow by default on both core and ops layer + * filtering stages, since there are no filters at all. + */ + KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); + KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false); + + target_filter = damos_new_filter(DAMOS_FILTER_TYPE_TARGET, true, true); + damos_add_filter(&scheme, target_filter); + damos_set_filters_default_reject(&scheme); + /* + * A core-handled allow-filter is installed. + * Rejct by default on core layer filtering stage due to the last + * core-layer-filter's behavior. + * Allow by default on ops layer filtering stage due to the absence of + * ops layer filters. + */ + KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, true); + KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false); + + target_filter->allow = false; + damos_set_filters_default_reject(&scheme); + /* + * A core-handled reject-filter is installed. + * Allow by default on core layer filtering stage due to the last + * core-layer-filter's behavior. + * Allow by default on ops layer filtering stage due to the absence of + * ops layer filters. + */ + KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); + KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false); + + anon_filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, true); + damos_add_filter(&scheme, anon_filter); + + damos_set_filters_default_reject(&scheme); + /* + * A core-handled reject-filter and ops-handled allow-filter are installed. + * Allow by default on core layer filtering stage due to the existence + * of the ops-handled filter. + * Reject by default on ops layer filtering stage due to the last + * ops-layer-filter's behavior. + */ + KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); + KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true); + + target_filter->allow = true; + damos_set_filters_default_reject(&scheme); + /* + * A core-handled allow-filter and ops-handled allow-filter are + * installed. + * Allow by default on core layer filtering stage due to the existence + * of the ops-handled filter. + * Reject by default on ops layer filtering stage due to the last + * ops-layer-filter's behavior. + */ + KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); + KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true); +} + static struct kunit_case damon_test_cases[] = { KUNIT_CASE(damon_test_target), KUNIT_CASE(damon_test_regions), @@ -504,6 +596,7 @@ static struct kunit_case damon_test_cases[] = { KUNIT_CASE(damos_test_new_filter), KUNIT_CASE(damos_test_filter_out), KUNIT_CASE(damon_test_feed_loop_next_input), + KUNIT_CASE(damon_test_set_filters_default_reject), {}, }; diff --git a/mm/damon/sysfs-test.h b/mm/damon/tests/sysfs-kunit.h index 73bdce2452c1..7b5c7b307da9 100644 --- a/mm/damon/sysfs-test.h +++ b/mm/damon/tests/sysfs-kunit.h @@ -38,7 +38,7 @@ static int __damon_sysfs_test_get_any_pid(int min, int max) return -1; } -static void damon_sysfs_test_set_targets(struct kunit *test) +static void damon_sysfs_test_add_targets(struct kunit *test) { struct damon_sysfs_targets *sysfs_targets; struct damon_sysfs_target *sysfs_target; @@ -56,22 +56,23 @@ static void damon_sysfs_test_set_targets(struct kunit *test) ctx = damon_new_ctx(); - damon_sysfs_set_targets(ctx, sysfs_targets); + damon_sysfs_add_targets(ctx, sysfs_targets); KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(ctx)); sysfs_target->pid = __damon_sysfs_test_get_any_pid( sysfs_target->pid + 1, 200); - damon_sysfs_set_targets(ctx, sysfs_targets); - KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(ctx)); + damon_sysfs_add_targets(ctx, sysfs_targets); + KUNIT_EXPECT_EQ(test, 2u, nr_damon_targets(ctx)); damon_destroy_ctx(ctx); kfree(sysfs_targets->targets_arr); kfree(sysfs_targets); + kfree(sysfs_target->regions); kfree(sysfs_target); } static struct kunit_case damon_sysfs_test_cases[] = { - KUNIT_CASE(damon_sysfs_test_set_targets), + KUNIT_CASE(damon_sysfs_test_add_targets), {}, }; diff --git a/mm/damon/vaddr-test.h b/mm/damon/tests/vaddr-kunit.h index 83626483f82b..7cd944266a92 100644 --- a/mm/damon/vaddr-test.h +++ b/mm/damon/tests/vaddr-kunit.h @@ -66,9 +66,9 @@ failed: static void damon_test_three_regions_in_vmas(struct kunit *test) { static struct mm_struct mm; - struct damon_addr_range regions[3] = {0,}; + struct damon_addr_range regions[3] = {0}; /* 10-20-25, 200-210-220, 300-305, 307-330 */ - struct vm_area_struct vmas[] = { + static struct vm_area_struct vmas[] = { (struct vm_area_struct) {.vm_start = 10, .vm_end = 20}, (struct vm_area_struct) {.vm_start = 20, .vm_end = 25}, (struct vm_area_struct) {.vm_start = 200, .vm_end = 210}, @@ -77,7 +77,7 @@ static void damon_test_three_regions_in_vmas(struct kunit *test) (struct vm_area_struct) {.vm_start = 307, .vm_end = 330}, }; - mt_init_flags(&mm.mm_mt, MM_MT_FLAGS); + mt_init_flags(&mm.mm_mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_USE_RCU); if (__link_vmas(&mm.mm_mt, vmas, ARRAY_SIZE(vmas))) kunit_skip(test, "Failed to create VMA tree"); @@ -300,6 +300,8 @@ static void damon_test_split_evenly(struct kunit *test) damon_test_split_evenly_fail(test, 0, 100, 0); damon_test_split_evenly_succ(test, 0, 100, 10); damon_test_split_evenly_succ(test, 5, 59, 5); + damon_test_split_evenly_succ(test, 4, 6, 1); + damon_test_split_evenly_succ(test, 0, 3, 2); damon_test_split_evenly_fail(test, 5, 6, 2); } diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 381559e4a1fa..46554e49a478 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * DAMON Primitives for Virtual Address Spaces + * DAMON Code for Virtual Address Spaces * * Author: SeongJae Park <sj@kernel.org> */ @@ -67,10 +67,14 @@ static int damon_va_evenly_split_region(struct damon_target *t, unsigned long sz_orig, sz_piece, orig_end; struct damon_region *n = NULL, *next; unsigned long start; + unsigned int i; if (!r || !nr_pieces) return -EINVAL; + if (nr_pieces == 1) + return 0; + orig_end = r->ar.end; sz_orig = damon_sz_region(r); sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION); @@ -80,8 +84,7 @@ static int damon_va_evenly_split_region(struct damon_target *t, r->ar.end = r->ar.start + sz_piece; next = damon_next_region(r); - for (start = r->ar.end; start + sz_piece <= orig_end; - start += sz_piece) { + for (start = r->ar.end, i = 1; i < nr_pieces; start += sz_piece, i++) { n = damon_new_region(start, start + sz_piece); if (!n) return -ENOMEM; @@ -126,6 +129,7 @@ static int __damon_va_three_regions(struct mm_struct *mm, * If this is too slow, it can be optimised to examine the maple * tree gaps. */ + rcu_read_lock(); for_each_vma(vmi, vma) { unsigned long gap; @@ -146,6 +150,7 @@ static int __damon_va_three_regions(struct mm_struct *mm, next: prev = vma; } + rcu_read_unlock(); if (!sz_range(&second_gap) || !sz_range(&first_gap)) return -EINVAL; @@ -339,7 +344,7 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) { bool referenced = false; - pte_t entry = huge_ptep_get(pte); + pte_t entry = huge_ptep_get(mm, addr, pte); struct folio *folio = pfn_folio(pte_pfn(entry)); unsigned long psize = huge_page_size(hstate_vma(vma)); @@ -351,11 +356,9 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm, set_huge_pte_at(mm, addr, pte, entry, psize); } -#ifdef CONFIG_MMU_NOTIFIER if (mmu_notifier_clear_young(mm, addr, addr + huge_page_size(hstate_vma(vma)))) referenced = true; -#endif /* CONFIG_MMU_NOTIFIER */ if (referenced) folio_set_young(folio); @@ -373,7 +376,7 @@ static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask, pte_t entry; ptl = huge_pte_lock(h, walk->mm, pte); - entry = huge_ptep_get(pte); + entry = huge_ptep_get(walk->mm, addr, pte); if (!pte_present(entry)) goto out; @@ -509,7 +512,7 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask, pte_t entry; ptl = huge_pte_lock(h, walk->mm, pte); - entry = huge_ptep_get(pte); + entry = huge_ptep_get(walk->mm, addr, pte); if (!pte_present(entry)) goto out; @@ -652,7 +655,7 @@ static unsigned long damos_madvise(struct damon_target *target, static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, - struct damos *scheme) + struct damos *scheme, unsigned long *sz_filter_passed) { int madv_action; @@ -707,7 +710,6 @@ static int __init damon_va_initcall(void) .update = damon_va_update, .prepare_access_checks = damon_va_prepare_access_checks, .check_accesses = damon_va_check_accesses, - .reset_aggregated = NULL, .target_valid = damon_va_target_valid, .cleanup = NULL, .apply_scheme = damon_va_apply_scheme, @@ -730,4 +732,4 @@ static int __init damon_va_initcall(void) subsys_initcall(damon_va_initcall); -#include "vaddr-test.h" +#include "tests/vaddr-kunit.h" |