summaryrefslogtreecommitdiff
path: root/drivers/cpufreq/cpufreq_ondemand.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c52
1 files changed, 17 insertions, 35 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 6a41ea4729b8..a6ecc203f7b7 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -22,7 +22,6 @@
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (100000)
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
-#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
#define MIN_FREQUENCY_UP_THRESHOLD (1)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
@@ -31,29 +30,6 @@ static struct od_ops od_ops;
static unsigned int default_powersave_bias;
/*
- * Not all CPUs want IO time to be accounted as busy; this depends on how
- * efficient idling at a higher frequency/voltage is.
- * Pavel Machek says this is not so for various generations of AMD and old
- * Intel systems.
- * Mike Chan (android.com) claims this is also not true for ARM.
- * Because of this, whitelist specific known (series) of CPUs by default, and
- * leave all others up to the user.
- */
-static int should_io_be_busy(void)
-{
-#if defined(CONFIG_X86)
- /*
- * For Intel, Core 2 (model 15) and later have an efficient idle.
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model >= 15)
- return 1;
-#endif
- return 0;
-}
-
-/*
* Find right freq to be set now with powersave_bias on.
* Returns the freq_hi to be used right now and will set freq_hi_delay_us,
* freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
@@ -77,7 +53,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
return freq_next;
}
- index = cpufreq_frequency_table_target(policy, freq_next, relation);
+ index = cpufreq_frequency_table_target(policy, freq_next, policy->min,
+ policy->max, relation);
freq_req = freq_table[index].frequency;
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;
@@ -202,7 +179,7 @@ static unsigned int od_dbs_update(struct cpufreq_policy *policy)
/************************** sysfs interface ************************/
static struct dbs_governor od_dbs_gov;
-static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
+static ssize_t io_is_busy_store(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -220,7 +197,7 @@ static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
return count;
}
-static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
+static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -237,7 +214,7 @@ static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
+static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -265,7 +242,7 @@ static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
+static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -290,7 +267,7 @@ static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
return count;
}
-static ssize_t store_powersave_bias(struct gov_attr_set *attr_set,
+static ssize_t powersave_bias_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
@@ -377,7 +354,7 @@ static int od_init(struct dbs_data *dbs_data)
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
dbs_data->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
- dbs_data->io_is_busy = should_io_be_busy();
+ dbs_data->io_is_busy = od_should_io_be_busy();
dbs_data->tuners = tuners;
return 0;
@@ -416,10 +393,13 @@ static struct dbs_governor od_dbs_gov = {
static void od_set_powersave_bias(unsigned int powersave_bias)
{
unsigned int cpu;
- cpumask_t done;
+ cpumask_var_t done;
+
+ if (!alloc_cpumask_var(&done, GFP_KERNEL))
+ return;
default_powersave_bias = powersave_bias;
- cpumask_clear(&done);
+ cpumask_clear(done);
cpus_read_lock();
for_each_online_cpu(cpu) {
@@ -428,7 +408,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
struct dbs_data *dbs_data;
struct od_dbs_tuners *od_tuners;
- if (cpumask_test_cpu(cpu, &done))
+ if (cpumask_test_cpu(cpu, done))
continue;
policy = cpufreq_cpu_get_raw(cpu);
@@ -439,13 +419,15 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
if (!policy_dbs)
continue;
- cpumask_or(&done, &done, policy->cpus);
+ cpumask_or(done, done, policy->cpus);
dbs_data = policy_dbs->dbs_data;
od_tuners = dbs_data->tuners;
od_tuners->powersave_bias = default_powersave_bias;
}
cpus_read_unlock();
+
+ free_cpumask_var(done);
}
void od_register_powersave_bias_handler(unsigned int (*f)