diff options
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 8 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 13 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 13 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_stats.c | 4 |
4 files changed, 33 insertions, 5 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 44d1eca83a72..35e0b9ceecf7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1497,6 +1497,7 @@ int cpufreq_update_policy(unsigned int cpu) } EXPORT_SYMBOL(cpufreq_update_policy); +#ifdef CONFIG_HOTPLUG_CPU static int cpufreq_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -1532,10 +1533,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block cpufreq_cpu_notifier = +static struct notifier_block __cpuinitdata cpufreq_cpu_notifier = { .notifier_call = cpufreq_cpu_callback, }; +#endif /* CONFIG_HOTPLUG_CPU */ /********************************************************************* * REGISTER / UNREGISTER CPUFREQ DRIVER * @@ -1596,7 +1598,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) } if (!ret) { - register_cpu_notifier(&cpufreq_cpu_notifier); + register_hotcpu_notifier(&cpufreq_cpu_notifier); dprintk("driver %s up and running\n", driver_data->name); cpufreq_debug_enable_ratelimit(); } @@ -1628,7 +1630,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) dprintk("unregistering driver %s\n", driver->name); sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); - unregister_cpu_notifier(&cpufreq_cpu_notifier); + unregister_hotcpu_notifier(&cpufreq_cpu_notifier); spin_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index e07a35487bde..b3ebc8f01975 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -22,6 +22,7 @@ #include <linux/types.h> #include <linux/fs.h> #include <linux/sysfs.h> +#include <linux/cpu.h> #include <linux/sched.h> #include <linux/kmod.h> #include <linux/workqueue.h> @@ -72,6 +73,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ +/* + * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug + * lock and dbs_mutex. cpu_hotplug lock should always be held before + * dbs_mutex. If any function that can potentially take cpu_hotplug lock + * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then + * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock + * is recursive for the same process. -Venki + */ static DEFINE_MUTEX (dbs_mutex); static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); @@ -414,12 +423,14 @@ static void dbs_check_cpu(int cpu) static void do_dbs_timer(void *data) { int i; + lock_cpu_hotplug(); mutex_lock(&dbs_mutex); for_each_online_cpu(i) dbs_check_cpu(i); schedule_delayed_work(&dbs_work, usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); mutex_unlock(&dbs_mutex); + unlock_cpu_hotplug(); } static inline void dbs_timer_init(void) @@ -514,6 +525,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, break; case CPUFREQ_GOV_LIMITS: + lock_cpu_hotplug(); mutex_lock(&dbs_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target( @@ -524,6 +536,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); mutex_unlock(&dbs_mutex); + unlock_cpu_hotplug(); break; } return 0; diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 3e6ffcaa5af4..693e540481b4 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -21,6 +21,7 @@ #include <linux/types.h> #include <linux/fs.h> #include <linux/sysfs.h> +#include <linux/cpu.h> #include <linux/sched.h> #include <linux/kmod.h> #include <linux/workqueue.h> @@ -71,6 +72,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ +/* + * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug + * lock and dbs_mutex. cpu_hotplug lock should always be held before + * dbs_mutex. If any function that can potentially take cpu_hotplug lock + * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then + * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock + * is recursive for the same process. -Venki + */ static DEFINE_MUTEX (dbs_mutex); static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); @@ -363,12 +372,14 @@ static void dbs_check_cpu(int cpu) static void do_dbs_timer(void *data) { int i; + lock_cpu_hotplug(); mutex_lock(&dbs_mutex); for_each_online_cpu(i) dbs_check_cpu(i); queue_delayed_work(dbs_workq, &dbs_work, usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); mutex_unlock(&dbs_mutex); + unlock_cpu_hotplug(); } static inline void dbs_timer_init(void) @@ -469,6 +480,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, break; case CPUFREQ_GOV_LIMITS: + lock_cpu_hotplug(); mutex_lock(&dbs_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target( @@ -479,6 +491,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); mutex_unlock(&dbs_mutex); + unlock_cpu_hotplug(); break; } return 0; diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index c576c0b3f452..145061b8472a 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -350,7 +350,7 @@ __init cpufreq_stats_init(void) return ret; } - register_cpu_notifier(&cpufreq_stat_cpu_notifier); + register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); lock_cpu_hotplug(); for_each_online_cpu(cpu) { cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE, @@ -368,7 +368,7 @@ __exit cpufreq_stats_exit(void) CPUFREQ_POLICY_NOTIFIER); cpufreq_unregister_notifier(¬ifier_trans_block, CPUFREQ_TRANSITION_NOTIFIER); - unregister_cpu_notifier(&cpufreq_stat_cpu_notifier); + unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); lock_cpu_hotplug(); for_each_online_cpu(cpu) { cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD, |