diff options
Diffstat (limited to 'mm/damon/core.c')
-rw-r--r-- | mm/damon/core.c | 204 |
1 files changed, 145 insertions, 59 deletions
diff --git a/mm/damon/core.c b/mm/damon/core.c index 979b29e16ef4..52a48c9316bc 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -407,6 +407,7 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern, scheme->wmarks = *wmarks; scheme->wmarks.activated = true; + scheme->migrate_dests = (struct damos_migrate_dests){}; scheme->target_nid = target_nid; return scheme; @@ -449,6 +450,9 @@ void damon_destroy_scheme(struct damos *s) damos_for_each_filter_safe(f, next, s) damos_destroy_filter(f); + + kfree(s->migrate_dests.node_id_arr); + kfree(s->migrate_dests.weight_arr); damon_del_scheme(s); damon_free_scheme(s); } @@ -498,8 +502,12 @@ void damon_free_target(struct damon_target *t) kfree(t); } -void damon_destroy_target(struct damon_target *t) +void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx) { + + if (ctx && ctx->ops.cleanup_target) + ctx->ops.cleanup_target(t); + damon_del_target(t); damon_free_target(t); } @@ -529,7 +537,8 @@ struct damon_ctx *damon_new_ctx(void) ctx->next_ops_update_sis = 0; mutex_init(&ctx->kdamond_lock); - mutex_init(&ctx->call_control_lock); + INIT_LIST_HEAD(&ctx->call_controls); + mutex_init(&ctx->call_controls_lock); mutex_init(&ctx->walk_control_lock); ctx->attrs.min_nr_regions = 10; @@ -545,13 +554,8 @@ static void damon_destroy_targets(struct damon_ctx *ctx) { struct damon_target *t, *next_t; - if (ctx->ops.cleanup) { - ctx->ops.cleanup(ctx); - return; - } - damon_for_each_target_safe(t, next_t, ctx) - damon_destroy_target(t); + damon_destroy_target(t, ctx); } void damon_destroy_ctx(struct damon_ctx *ctx) @@ -676,9 +680,7 @@ static bool damon_valid_intervals_goal(struct damon_attrs *attrs) * @attrs: monitoring attributes * * This function should be called while the kdamond is not running, an access - * check results aggregation is not ongoing (e.g., from &struct - * damon_callback->after_aggregation or &struct - * damon_callback->after_wmarks_check callbacks), or from damon_call(). + * check results aggregation is not ongoing (e.g., from damon_call(). * * Every time interval is in micro-seconds. * @@ -754,6 +756,19 @@ static struct damos_quota_goal *damos_nth_quota_goal( return NULL; } +static void damos_commit_quota_goal_union( + struct damos_quota_goal *dst, struct damos_quota_goal *src) +{ + switch (dst->metric) { + case DAMOS_QUOTA_NODE_MEM_USED_BP: + case DAMOS_QUOTA_NODE_MEM_FREE_BP: + dst->nid = src->nid; + break; + default: + break; + } +} + static void damos_commit_quota_goal( struct damos_quota_goal *dst, struct damos_quota_goal *src) { @@ -762,6 +777,7 @@ static void damos_commit_quota_goal( if (dst->metric == DAMOS_QUOTA_USER_INPUT) dst->current_value = src->current_value; /* keep last_psi_total as is, since it will be updated in next cycle */ + damos_commit_quota_goal_union(dst, src); } /** @@ -774,7 +790,7 @@ static void damos_commit_quota_goal( * DAMON contexts, instead of manual in-place updates. * * This function should be called from parameters-update safe context, like - * DAMON callbacks. + * damon_call(). */ int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src) { @@ -795,6 +811,7 @@ int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src) src_goal->metric, src_goal->target_value); if (!new_goal) return -ENOMEM; + damos_commit_quota_goal_union(new_goal, src_goal); damos_add_quota_goal(dst, new_goal); } return 0; @@ -939,6 +956,41 @@ static void damos_set_filters_default_reject(struct damos *s) damos_filters_default_reject(&s->ops_filters); } +static int damos_commit_dests(struct damos *dst, struct damos *src) +{ + struct damos_migrate_dests *dst_dests, *src_dests; + + dst_dests = &dst->migrate_dests; + src_dests = &src->migrate_dests; + + if (dst_dests->nr_dests != src_dests->nr_dests) { + kfree(dst_dests->node_id_arr); + kfree(dst_dests->weight_arr); + + dst_dests->node_id_arr = kmalloc_array(src_dests->nr_dests, + sizeof(*dst_dests->node_id_arr), GFP_KERNEL); + if (!dst_dests->node_id_arr) { + dst_dests->weight_arr = NULL; + return -ENOMEM; + } + + dst_dests->weight_arr = kmalloc_array(src_dests->nr_dests, + sizeof(*dst_dests->weight_arr), GFP_KERNEL); + if (!dst_dests->weight_arr) { + /* ->node_id_arr will be freed by scheme destruction */ + return -ENOMEM; + } + } + + dst_dests->nr_dests = src_dests->nr_dests; + for (int i = 0; i < src_dests->nr_dests; i++) { + dst_dests->node_id_arr[i] = src_dests->node_id_arr[i]; + dst_dests->weight_arr[i] = src_dests->weight_arr[i]; + } + + return 0; +} + static int damos_commit_filters(struct damos *dst, struct damos *src) { int err; @@ -978,6 +1030,11 @@ static int damos_commit(struct damos *dst, struct damos *src) return err; dst->wmarks = src->wmarks; + dst->target_nid = src->target_nid; + + err = damos_commit_dests(dst, src); + if (err) + return err; err = damos_commit_filters(dst, src); return err; @@ -1095,9 +1152,7 @@ static int damon_commit_targets( } else { struct damos *s; - if (damon_target_has_pid(dst)) - put_pid(dst_target->pid); - damon_destroy_target(dst_target); + damon_destroy_target(dst_target, dst); damon_for_each_scheme(s, dst) { if (s->quota.charge_target_from == dst_target) { s->quota.charge_target_from = NULL; @@ -1116,7 +1171,7 @@ static int damon_commit_targets( err = damon_commit_target(new_target, false, src_target, damon_target_has_pid(src)); if (err) { - damon_destroy_target(new_target); + damon_destroy_target(new_target, NULL); return err; } damon_add_target(dst, new_target); @@ -1135,7 +1190,7 @@ static int damon_commit_targets( * in-place updates. * * This function should be called from parameters-update safe context, like - * DAMON callbacks. + * damon_call(). */ int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src) { @@ -1311,7 +1366,13 @@ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) return err; } -static bool damon_is_running(struct damon_ctx *ctx) +/** + * damon_is_running() - Returns if a given DAMON context is running. + * @ctx: The DAMON context to see if running. + * + * Return: true if @ctx is running, false otherwise. + */ +bool damon_is_running(struct damon_ctx *ctx) { bool running; @@ -1328,8 +1389,9 @@ static bool damon_is_running(struct damon_ctx *ctx) * * Ask DAMON worker thread (kdamond) of @ctx to call a function with an * argument data that respectively passed via &damon_call_control->fn and - * &damon_call_control->data of @control, and wait until the kdamond finishes - * handling of the request. + * &damon_call_control->data of @control. If &damon_call_control->repeat of + * @control is set, further wait until the kdamond finishes handling of the + * request. Otherwise, return as soon as the request is made. * * The kdamond executes the function with the argument in the main loop, just * after a sampling of the iteration is finished. The function can hence @@ -1341,18 +1403,18 @@ static bool damon_is_running(struct damon_ctx *ctx) */ int damon_call(struct damon_ctx *ctx, struct damon_call_control *control) { - init_completion(&control->completion); + if (!control->repeat) + init_completion(&control->completion); control->canceled = false; + INIT_LIST_HEAD(&control->list); - mutex_lock(&ctx->call_control_lock); - if (ctx->call_control) { - mutex_unlock(&ctx->call_control_lock); - return -EBUSY; - } - ctx->call_control = control; - mutex_unlock(&ctx->call_control_lock); + mutex_lock(&ctx->call_controls_lock); + list_add_tail(&ctx->call_controls, &control->list); + mutex_unlock(&ctx->call_controls_lock); if (!damon_is_running(ctx)) return -EINVAL; + if (control->repeat) + return 0; wait_for_completion(&control->completion); if (control->canceled) return -ECANCELED; @@ -1490,6 +1552,7 @@ static void kdamond_tune_intervals(struct damon_ctx *c) new_attrs.sample_interval); new_attrs.aggr_interval = new_attrs.sample_interval * c->attrs.aggr_samples; + trace_damon_monitor_intervals_tune(new_attrs.sample_interval); damon_set_attrs(c, &new_attrs); } @@ -2010,12 +2073,26 @@ static void damos_set_effective_quota(struct damos_quota *quota) quota->esz = esz; } +static void damos_trace_esz(struct damon_ctx *c, struct damos *s, + struct damos_quota *quota) +{ + unsigned int cidx = 0, sidx = 0; + struct damos *siter; + + damon_for_each_scheme(siter, c) { + if (siter == s) + break; + sidx++; + } + trace_damos_esz(cidx, sidx, quota->esz); +} + static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) { struct damos_quota *quota = &s->quota; struct damon_target *t; struct damon_region *r; - unsigned long cumulated_sz; + unsigned long cumulated_sz, cached_esz; unsigned int score, max_score = 0; if (!quota->ms && !quota->sz && list_empty("a->goals)) @@ -2029,7 +2106,11 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) quota->total_charged_sz += quota->charged_sz; quota->charged_from = jiffies; quota->charged_sz = 0; + if (trace_damos_esz_enabled()) + cached_esz = quota->esz; damos_set_effective_quota(quota); + if (trace_damos_esz_enabled() && quota->esz != cached_esz) + damos_trace_esz(c, s, quota); } if (!c->ops.get_scheme_score) @@ -2350,11 +2431,11 @@ static void kdamond_usleep(unsigned long usecs) } /* - * kdamond_call() - handle damon_call_control. + * kdamond_call() - handle damon_call_control objects. * @ctx: The &struct damon_ctx of the kdamond. * @cancel: Whether to cancel the invocation of the function. * - * If there is a &struct damon_call_control request that registered via + * If there are &struct damon_call_control requests that registered via * &damon_call() on @ctx, do or cancel the invocation of the function depending * on @cancel. @cancel is set when the kdamond is already out of the main loop * and therefore will be terminated. @@ -2362,23 +2443,37 @@ static void kdamond_usleep(unsigned long usecs) static void kdamond_call(struct damon_ctx *ctx, bool cancel) { struct damon_call_control *control; + LIST_HEAD(repeat_controls); int ret = 0; - mutex_lock(&ctx->call_control_lock); - control = ctx->call_control; - mutex_unlock(&ctx->call_control_lock); - if (!control) - return; - if (cancel) { - control->canceled = true; - } else { - ret = control->fn(control->data); - control->return_code = ret; + while (true) { + mutex_lock(&ctx->call_controls_lock); + control = list_first_entry_or_null(&ctx->call_controls, + struct damon_call_control, list); + mutex_unlock(&ctx->call_controls_lock); + if (!control) + break; + if (cancel) { + control->canceled = true; + } else { + ret = control->fn(control->data); + control->return_code = ret; + } + mutex_lock(&ctx->call_controls_lock); + list_del(&control->list); + mutex_unlock(&ctx->call_controls_lock); + if (!control->repeat) + complete(&control->completion); + else + list_add(&control->list, &repeat_controls); } - complete(&control->completion); - mutex_lock(&ctx->call_control_lock); - ctx->call_control = NULL; - mutex_unlock(&ctx->call_control_lock); + control = list_first_entry_or_null(&repeat_controls, + struct damon_call_control, list); + if (!control || cancel) + return; + mutex_lock(&ctx->call_controls_lock); + list_add_tail(&control->list, &ctx->call_controls); + mutex_unlock(&ctx->call_controls_lock); } /* Returns negative error code if it's not activated but should return */ @@ -2402,9 +2497,6 @@ static int kdamond_wait_activation(struct damon_ctx *ctx) kdamond_usleep(min_wait_time); - if (ctx->callback.after_wmarks_check && - ctx->callback.after_wmarks_check(ctx)) - break; kdamond_call(ctx, false); damos_walk_cancel(ctx); } @@ -2461,10 +2553,9 @@ static int kdamond_fn(void *data) while (!kdamond_need_stop(ctx)) { /* * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could - * be changed from after_wmarks_check() or after_aggregation() - * callbacks. Read the values here, and use those for this - * iteration. That is, damon_set_attrs() updated new values - * are respected from next iteration. + * be changed from kdamond_call(). Read the values here, and + * use those for this iteration. That is, damon_set_attrs() + * updated new values are respected from next iteration. */ unsigned long next_aggregation_sis = ctx->next_aggregation_sis; unsigned long next_ops_update_sis = ctx->next_ops_update_sis; @@ -2482,14 +2573,10 @@ static int kdamond_fn(void *data) if (ctx->ops.check_accesses) max_nr_accesses = ctx->ops.check_accesses(ctx); - if (ctx->passed_sample_intervals >= next_aggregation_sis) { + if (ctx->passed_sample_intervals >= next_aggregation_sis) kdamond_merge_regions(ctx, max_nr_accesses / 10, sz_limit); - if (ctx->callback.after_aggregation && - ctx->callback.after_aggregation(ctx)) - break; - } /* * do kdamond_call() and kdamond_apply_schemes() after @@ -2555,8 +2642,6 @@ done: damon_destroy_region(r, t); } - if (ctx->callback.before_terminate) - ctx->callback.before_terminate(ctx); if (ctx->ops.cleanup) ctx->ops.cleanup(ctx); kfree(ctx->regions_score_histogram); @@ -2575,6 +2660,7 @@ done: running_exclusive_ctxs = false; mutex_unlock(&damon_lock); + damon_destroy_targets(ctx); return 0; } |