summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 14:09:46 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 14:09:46 -0700
commit0e285e90887bcb248178d55960e1276188c657c9 (patch)
treeda03ead4af7bce5741c7b5e6f73152b3e9f5f433 /kernel
parent9410091dd5b4097819fcbb6d63987c51f62c85fd (diff)
parenta1d2fcfd80bef6cdc5675dcabe25587010720bdb (diff)
Merge tag 'pm-4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki: "This time the majority of changes go to the cpufreq subsystem (and to the intel_pstate driver in particular) and there are some updates in the generic power domains framework, cpuidle, tools and a couple of other places. One thing worth mentioning is that the intel_pstate's sysfs interface has been reworked to be more consistent with the general expectations of the cpufreq core and less confusing, hopefully for the better. Also, we have a new cpufreq driver for Tegra186 and new hardware support in intel_pstata and the Mediatek cpufreq driver. Apart from that, the AnalyzeSuspend utility for system suspend profiling gets a companion called AnalyzeBoot for the analogous profiling of system boot and they both go into one place under tools/power/pm-graph/. The rest is mostly fixes, cleanups and code reorganization. Specifics: - Rework the intel_pstate driver's sysfs interface to make it more straightforward and more intuitive (Rafael Wysocki). - Make intel_pstate support all processors which advertise HWP (hardware-managed P-states) to the kernel in all operation modes and make it use the load-based P-state selection algorithm on a wider range of systems in the active mode (Rafael Wysocki). - Add cpufreq driver for Tegra186 (Mikko Perttunen). - Add support for Gemini Lake SoCs to intel_pstate (David Box). - Add support for MT8176 and MT817x to the Mediatek cpufreq driver and clean up that driver a bit (Daniel Kurtz). - Clean up intel_pstate and optimize it slightly (Rafael Wysocki). - Update the schedutil cpufreq governor, mostly to fix a couple of issues with it related to specific workloads, and rework its sysfs tunable and initialization a bit (Rafael Wysocki, Viresh Kumar). - Fix minor issues in the imx6q, dbx500 and qoriq cpufreq drivers (Christophe Jaillet, Irina Tirdea, Leonard Crestez, Viresh Kumar, YuanTian Tang). - Add file patterns for cpufreq DT bindings to MAINTAINERS (Geert Uytterhoeven). - Add support for "always on" power domains to the genpd (generic power domains) framework and clean up that code somewhat (Ulf Hansson, Lina Iyer, Viresh Kumar). - Fix minor issues in the powernv cpuidle driver and clean it up (Anton Blanchard, Gautham Shenoy). - Move the AnalyzeSuspend utility under tools/power/pm-graph/ and add an analogous boot-profiling utility called AnalyzeBoot to it (Todd Brandt). - Add rk3328 support to the rockchip-io AVS (Adaptive Voltage Scaling) driver (David Wu). - Fix minor issues in the cpuidle core, the intel_pstate_tracer utility, the devfreq framework and the PM core documentation (Chanwoo Choi, Doug Smythies, Johan Hovold, Marcin Nowakowski)" * tag 'pm-4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (56 commits) PM / runtime: Document autosuspend-helper side effects PM / runtime: Fix autosuspend documentation tools: power: pm-graph: Package makefile and man pages tools: power: pm-graph: AnalyzeBoot v2.0 tools: power: pm-graph: AnalyzeSuspend v4.6 cpufreq: Add Tegra186 cpufreq driver cpufreq: imx6q: Fix error handling code cpufreq: imx6q: Set max suspend_freq to avoid changes during suspend cpufreq: imx6q: Fix handling EPROBE_DEFER from regulator cpuidle: powernv: Avoid a branch in the core snooze_loop() loop cpuidle: powernv: Don't continually set thread priority in snooze_loop() cpuidle: powernv: Don't bounce between low and very low thread priority cpuidle: cpuidle-cps: remove unused variable tools/power/x86/intel_pstate_tracer: Adjust directory ownership cpufreq: schedutil: Use policy-dependent transition delays cpufreq: schedutil: Reduce frequencies slower PM / devfreq: Move struct devfreq_governor to devfreq directory PM / Domains: Ignore domain-idle-states that are not compatible cpufreq: intel_pstate: Add support for Gemini Lake powernv-cpuidle: Validate DT property array size ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/cpufreq_schedutil.c82
-rw-r--r--kernel/time/tick-sched.c12
2 files changed, 66 insertions, 28 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 54c577578da6..76877a62b5fa 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -61,6 +61,11 @@ struct sugov_cpu {
unsigned long util;
unsigned long max;
unsigned int flags;
+
+ /* The field below is for single-CPU policies only. */
+#ifdef CONFIG_NO_HZ_COMMON
+ unsigned long saved_idle_calls;
+#endif
};
static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
@@ -93,22 +98,23 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
{
struct cpufreq_policy *policy = sg_policy->policy;
+ if (sg_policy->next_freq == next_freq)
+ return;
+
+ if (sg_policy->next_freq > next_freq)
+ next_freq = (sg_policy->next_freq + next_freq) >> 1;
+
+ sg_policy->next_freq = next_freq;
sg_policy->last_freq_update_time = time;
if (policy->fast_switch_enabled) {
- if (sg_policy->next_freq == next_freq) {
- trace_cpu_frequency(policy->cur, smp_processor_id());
- return;
- }
- sg_policy->next_freq = next_freq;
next_freq = cpufreq_driver_fast_switch(policy, next_freq);
if (next_freq == CPUFREQ_ENTRY_INVALID)
return;
policy->cur = next_freq;
trace_cpu_frequency(next_freq, smp_processor_id());
- } else if (sg_policy->next_freq != next_freq) {
- sg_policy->next_freq = next_freq;
+ } else {
sg_policy->work_in_progress = true;
irq_work_queue(&sg_policy->irq_work);
}
@@ -192,6 +198,19 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
sg_cpu->iowait_boost >>= 1;
}
+#ifdef CONFIG_NO_HZ_COMMON
+static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
+{
+ unsigned long idle_calls = tick_nohz_get_idle_calls();
+ bool ret = idle_calls == sg_cpu->saved_idle_calls;
+
+ sg_cpu->saved_idle_calls = idle_calls;
+ return ret;
+}
+#else
+static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+#endif /* CONFIG_NO_HZ_COMMON */
+
static void sugov_update_single(struct update_util_data *hook, u64 time,
unsigned int flags)
{
@@ -200,6 +219,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
struct cpufreq_policy *policy = sg_policy->policy;
unsigned long util, max;
unsigned int next_f;
+ bool busy;
sugov_set_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
@@ -207,40 +227,37 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
if (!sugov_should_update_freq(sg_policy, time))
return;
+ busy = sugov_cpu_is_busy(sg_cpu);
+
if (flags & SCHED_CPUFREQ_RT_DL) {
next_f = policy->cpuinfo.max_freq;
} else {
sugov_get_util(&util, &max);
sugov_iowait_boost(sg_cpu, &util, &max);
next_f = get_next_freq(sg_policy, util, max);
+ /*
+ * Do not reduce the frequency if the CPU has not been idle
+ * recently, as the reduction is likely to be premature then.
+ */
+ if (busy && next_f < sg_policy->next_freq)
+ next_f = sg_policy->next_freq;
}
sugov_update_commit(sg_policy, time, next_f);
}
-static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
- unsigned long util, unsigned long max,
- unsigned int flags)
+static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
- unsigned int max_f = policy->cpuinfo.max_freq;
u64 last_freq_update_time = sg_policy->last_freq_update_time;
+ unsigned long util = 0, max = 1;
unsigned int j;
- if (flags & SCHED_CPUFREQ_RT_DL)
- return max_f;
-
- sugov_iowait_boost(sg_cpu, &util, &max);
-
for_each_cpu(j, policy->cpus) {
- struct sugov_cpu *j_sg_cpu;
+ struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
unsigned long j_util, j_max;
s64 delta_ns;
- if (j == smp_processor_id())
- continue;
-
- j_sg_cpu = &per_cpu(sugov_cpu, j);
/*
* If the CPU utilization was last updated before the previous
* frequency update and the time elapsed between the last update
@@ -254,7 +271,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
continue;
}
if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
- return max_f;
+ return policy->cpuinfo.max_freq;
j_util = j_sg_cpu->util;
j_max = j_sg_cpu->max;
@@ -289,7 +306,11 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
sg_cpu->last_update = time;
if (sugov_should_update_freq(sg_policy, time)) {
- next_f = sugov_next_freq_shared(sg_cpu, util, max, flags);
+ if (flags & SCHED_CPUFREQ_RT_DL)
+ next_f = sg_policy->policy->cpuinfo.max_freq;
+ else
+ next_f = sugov_next_freq_shared(sg_cpu);
+
sugov_update_commit(sg_policy, time, next_f);
}
@@ -473,7 +494,6 @@ static int sugov_init(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy;
struct sugov_tunables *tunables;
- unsigned int lat;
int ret = 0;
/* State should be equivalent to EXIT */
@@ -512,10 +532,16 @@ static int sugov_init(struct cpufreq_policy *policy)
goto stop_kthread;
}
- tunables->rate_limit_us = LATENCY_MULTIPLIER;
- lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
- if (lat)
- tunables->rate_limit_us *= lat;
+ if (policy->transition_delay_us) {
+ tunables->rate_limit_us = policy->transition_delay_us;
+ } else {
+ unsigned int lat;
+
+ tunables->rate_limit_us = LATENCY_MULTIPLIER;
+ lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
+ if (lat)
+ tunables->rate_limit_us *= lat;
+ }
policy->governor_data = sg_policy;
sg_policy->tunables = tunables;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 7fe53be86077..64c97fc130c4 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -993,6 +993,18 @@ ktime_t tick_nohz_get_sleep_length(void)
return ts->sleep_length;
}
+/**
+ * tick_nohz_get_idle_calls - return the current idle calls counter value
+ *
+ * Called from the schedutil frequency scaling governor in scheduler context.
+ */
+unsigned long tick_nohz_get_idle_calls(void)
+{
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
+
+ return ts->idle_calls;
+}
+
static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
{
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE