diff options
Diffstat (limited to 'kernel/sched/cpufreq_schedutil.c')
| -rw-r--r-- | kernel/sched/cpufreq_schedutil.c | 45 | 
1 files changed, 24 insertions, 21 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 1207c78f85c1..e3211455b203 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -48,7 +48,6 @@ struct sugov_cpu {  	unsigned long		util;  	unsigned long		bw_dl; -	unsigned long		max;  	/* The field below is for single-CPU policies only: */  #ifdef CONFIG_NO_HZ_COMMON @@ -158,7 +157,6 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)  {  	struct rq *rq = cpu_rq(sg_cpu->cpu); -	sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);  	sg_cpu->bw_dl = cpu_bw_dl(rq);  	sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),  					  FREQUENCY_UTIL, NULL); @@ -238,6 +236,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,   * sugov_iowait_apply() - Apply the IO boost to a CPU.   * @sg_cpu: the sugov data for the cpu to boost   * @time: the update time from the caller + * @max_cap: the max CPU capacity   *   * A CPU running a task which woken up after an IO operation can have its   * utilization boosted to speed up the completion of those IO operations. @@ -251,7 +250,8 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,   * This mechanism is designed to boost high frequently IO waiting tasks, while   * being more conservative on tasks which does sporadic IO operations.   */ -static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time) +static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time, +			       unsigned long max_cap)  {  	unsigned long boost; @@ -280,7 +280,7 @@ static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)  	 * sg_cpu->util is already in capacity scale; convert iowait_boost  	 * into the same scale so we can compare.  	 */ -	boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT; +	boost = (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT;  	boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);  	if (sg_cpu->util < boost)  		sg_cpu->util = boost; @@ -310,7 +310,8 @@ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)  }  static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, -					      u64 time, unsigned int flags) +					      u64 time, unsigned long max_cap, +					      unsigned int flags)  {  	sugov_iowait_boost(sg_cpu, time, flags);  	sg_cpu->last_update = time; @@ -321,7 +322,7 @@ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,  		return false;  	sugov_get_util(sg_cpu); -	sugov_iowait_apply(sg_cpu, time); +	sugov_iowait_apply(sg_cpu, time, max_cap);  	return true;  } @@ -332,12 +333,15 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,  	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);  	struct sugov_policy *sg_policy = sg_cpu->sg_policy;  	unsigned int cached_freq = sg_policy->cached_raw_freq; +	unsigned long max_cap;  	unsigned int next_f; -	if (!sugov_update_single_common(sg_cpu, time, flags)) +	max_cap = arch_scale_cpu_capacity(sg_cpu->cpu); + +	if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))  		return; -	next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max); +	next_f = get_next_freq(sg_policy, sg_cpu->util, max_cap);  	/*  	 * Do not reduce the frequency if the CPU has not been idle  	 * recently, as the reduction is likely to be premature then. @@ -374,6 +378,7 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,  {  	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);  	unsigned long prev_util = sg_cpu->util; +	unsigned long max_cap;  	/*  	 * Fall back to the "frequency" path if frequency invariance is not @@ -385,7 +390,9 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,  		return;  	} -	if (!sugov_update_single_common(sg_cpu, time, flags)) +	max_cap = arch_scale_cpu_capacity(sg_cpu->cpu); + +	if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))  		return;  	/* @@ -399,7 +406,7 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,  		sg_cpu->util = prev_util;  	cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl), -				   map_util_perf(sg_cpu->util), sg_cpu->max); +				   map_util_perf(sg_cpu->util), max_cap);  	sg_cpu->sg_policy->last_freq_update_time = time;  } @@ -408,25 +415,21 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)  {  	struct sugov_policy *sg_policy = sg_cpu->sg_policy;  	struct cpufreq_policy *policy = sg_policy->policy; -	unsigned long util = 0, max = 1; +	unsigned long util = 0, max_cap;  	unsigned int j; +	max_cap = arch_scale_cpu_capacity(sg_cpu->cpu); +  	for_each_cpu(j, policy->cpus) {  		struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); -		unsigned long j_util, j_max;  		sugov_get_util(j_sg_cpu); -		sugov_iowait_apply(j_sg_cpu, time); -		j_util = j_sg_cpu->util; -		j_max = j_sg_cpu->max; +		sugov_iowait_apply(j_sg_cpu, time, max_cap); -		if (j_util * max > j_max * util) { -			util = j_util; -			max = j_max; -		} +		util = max(j_sg_cpu->util, util);  	} -	return get_next_freq(sg_policy, util, max); +	return get_next_freq(sg_policy, util, max_cap);  }  static void @@ -543,7 +546,7 @@ static void sugov_tunables_free(struct kobject *kobj)  	kfree(to_sugov_tunables(attr_set));  } -static struct kobj_type sugov_tunables_ktype = { +static const struct kobj_type sugov_tunables_ktype = {  	.default_groups = sugov_groups,  	.sysfs_ops = &governor_sysfs_ops,  	.release = &sugov_tunables_free,  | 
