summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/hw_breakpoint.c8
-rw-r--r--arch/arm/kvm/arm.c7
-rw-r--r--arch/arm64/kernel/debug-monitors.c6
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c7
-rw-r--r--arch/ia64/kernel/err_inject.c15
-rw-r--r--arch/ia64/kernel/palinfo.c6
-rw-r--r--arch/ia64/kernel/salinfo.c6
-rw-r--r--arch/ia64/kernel/topology.c6
-rw-r--r--arch/powerpc/kernel/sysfs.c8
-rw-r--r--arch/s390/kernel/cache.c5
-rw-r--r--arch/s390/kernel/smp.c13
-rw-r--r--arch/sparc/kernel/sysfs.c6
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c13
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c8
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c18
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c6
-rw-r--r--arch/x86/kernel/cpuid.c15
-rw-r--r--arch/x86/kernel/hpet.c4
-rw-r--r--arch/x86/kernel/msr.c16
-rw-r--r--arch/x86/kernel/vsyscall_64.c6
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/oprofile/nmi_int.c15
-rw-r--r--arch/x86/pci/amd_bus.c5
26 files changed, 158 insertions, 70 deletions
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 9da35c6d3411..4d963fb66e3f 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1073,6 +1073,8 @@ static int __init arch_hw_breakpoint_init(void)
core_num_brps = get_num_brps();
core_num_wrps = get_num_wrps();
+ cpu_notifier_register_begin();
+
/*
* We need to tread carefully here because DBGSWENABLE may be
* driven low on this core and there isn't an architected way to
@@ -1089,6 +1091,7 @@ static int __init arch_hw_breakpoint_init(void)
if (!cpumask_empty(&debug_err_mask)) {
core_num_brps = 0;
core_num_wrps = 0;
+ cpu_notifier_register_done();
return 0;
}
@@ -1108,7 +1111,10 @@ static int __init arch_hw_breakpoint_init(void)
TRAP_HWBKPT, "breakpoint debug exception");
/* Register hotplug and PM notifiers. */
- register_cpu_notifier(&dbg_reset_nb);
+ __register_cpu_notifier(&dbg_reset_nb);
+
+ cpu_notifier_register_done();
+
pm_init();
return 0;
}
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index bd18bb8b2770..f0e50a0f3a65 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1051,21 +1051,26 @@ int kvm_arch_init(void *opaque)
}
}
+ cpu_notifier_register_begin();
+
err = init_hyp_mode();
if (err)
goto out_err;
- err = register_cpu_notifier(&hyp_init_cpu_nb);
+ err = __register_cpu_notifier(&hyp_init_cpu_nb);
if (err) {
kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
goto out_err;
}
+ cpu_notifier_register_done();
+
hyp_cpu_pm_init();
kvm_coproc_table_init();
return 0;
out_err:
+ cpu_notifier_register_done();
return err;
}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 14ba23c61153..ed3955a95747 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -154,13 +154,17 @@ static struct notifier_block os_lock_nb = {
static int debug_monitors_init(void)
{
+ cpu_notifier_register_begin();
+
/* Clear the OS lock. */
on_each_cpu(clear_os_lock, NULL, 1);
isb();
local_dbg_enable();
/* Register hotplug handler. */
- register_cpu_notifier(&os_lock_nb);
+ __register_cpu_notifier(&os_lock_nb);
+
+ cpu_notifier_register_done();
return 0;
}
postcore_initcall(debug_monitors_init);
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index f17f581116fc..bee789757806 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -913,6 +913,8 @@ static int __init arch_hw_breakpoint_init(void)
pr_info("found %d breakpoint and %d watchpoint registers.\n",
core_num_brps, core_num_wrps);
+ cpu_notifier_register_begin();
+
/*
* Reset the breakpoint resources. We assume that a halting
* debugger will leave the world in a nice state for us.
@@ -927,7 +929,10 @@ static int __init arch_hw_breakpoint_init(void)
TRAP_HWBKPT, "hw-watchpoint handler");
/* Register hotplug notifier. */
- register_cpu_notifier(&hw_breakpoint_reset_nb);
+ __register_cpu_notifier(&hw_breakpoint_reset_nb);
+
+ cpu_notifier_register_done();
+
/* Register cpu_suspend hw breakpoint restore hook */
cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index f59c0b844e88..0c161ed6d18e 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -269,12 +269,17 @@ err_inject_init(void)
#ifdef ERR_INJ_DEBUG
printk(KERN_INFO "Enter error injection driver.\n");
#endif
+
+ cpu_notifier_register_begin();
+
for_each_online_cpu(i) {
err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE,
(void *)(long)i);
}
- register_hotcpu_notifier(&err_inject_cpu_notifier);
+ __register_hotcpu_notifier(&err_inject_cpu_notifier);
+
+ cpu_notifier_register_done();
return 0;
}
@@ -288,11 +293,17 @@ err_inject_exit(void)
#ifdef ERR_INJ_DEBUG
printk(KERN_INFO "Exit error injection driver.\n");
#endif
+
+ cpu_notifier_register_begin();
+
for_each_online_cpu(i) {
sys_dev = get_cpu_device(i);
sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
}
- unregister_hotcpu_notifier(&err_inject_cpu_notifier);
+
+ __unregister_hotcpu_notifier(&err_inject_cpu_notifier);
+
+ cpu_notifier_register_done();
}
module_init(err_inject_init);
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index ab333284f4b2..c39c3cd3ac34 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -996,13 +996,17 @@ palinfo_init(void)
if (!palinfo_dir)
return -ENOMEM;
+ cpu_notifier_register_begin();
+
/* Create palinfo dirs in /proc for all online cpus */
for_each_online_cpu(i) {
create_palinfo_proc_entries(i);
}
/* Register for future delivery via notify registration */
- register_hotcpu_notifier(&palinfo_cpu_notifier);
+ __register_hotcpu_notifier(&palinfo_cpu_notifier);
+
+ cpu_notifier_register_done();
return 0;
}
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 960a396f5929..ee9719eebb1e 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -635,6 +635,8 @@ salinfo_init(void)
(void *)salinfo_entries[i].feature);
}
+ cpu_notifier_register_begin();
+
for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
data = salinfo_data + i;
data->type = i;
@@ -669,7 +671,9 @@ salinfo_init(void)
salinfo_timer.function = &salinfo_timeout;
add_timer(&salinfo_timer);
- register_hotcpu_notifier(&salinfo_cpu_notifier);
+ __register_hotcpu_notifier(&salinfo_cpu_notifier);
+
+ cpu_notifier_register_done();
return 0;
}
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index ca69a5a96dcc..f295f9abba4b 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -454,12 +454,16 @@ static int __init cache_sysfs_init(void)
{
int i;
+ cpu_notifier_register_begin();
+
for_each_online_cpu(i) {
struct device *sys_dev = get_cpu_device((unsigned int)i);
cache_add_dev(sys_dev);
}
- register_hotcpu_notifier(&cache_cpu_notifier);
+ __register_hotcpu_notifier(&cache_cpu_notifier);
+
+ cpu_notifier_register_done();
return 0;
}
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 97e1dc917683..d90d4b7810d6 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -975,7 +975,8 @@ static int __init topology_init(void)
int cpu;
register_nodes();
- register_cpu_notifier(&sysfs_cpu_nb);
+
+ cpu_notifier_register_begin();
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -999,6 +1000,11 @@ static int __init topology_init(void)
if (cpu_online(cpu))
register_cpu_online(cpu);
}
+
+ __register_cpu_notifier(&sysfs_cpu_nb);
+
+ cpu_notifier_register_done();
+
#ifdef CONFIG_PPC64
sysfs_create_dscr_default();
#endif /* CONFIG_PPC64 */
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 3a414c0f93ed..c0b03c28d157 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -378,9 +378,12 @@ static int __init cache_init(void)
if (!test_facility(34))
return 0;
cache_build_info();
+
+ cpu_notifier_register_begin();
for_each_online_cpu(cpu)
cache_add_cpu(cpu);
- hotcpu_notifier(cache_hotplug, 0);
+ __hotcpu_notifier(cache_hotplug, 0);
+ cpu_notifier_register_done();
return 0;
}
device_initcall(cache_init);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 8827883310dd..5a640b395bd4 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1057,19 +1057,24 @@ static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
static int __init s390_smp_init(void)
{
- int cpu, rc;
+ int cpu, rc = 0;
- hotcpu_notifier(smp_cpu_notify, 0);
#ifdef CONFIG_HOTPLUG_CPU
rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
if (rc)
return rc;
#endif
+ cpu_notifier_register_begin();
for_each_present_cpu(cpu) {
rc = smp_add_present_cpu(cpu);
if (rc)
- return rc;
+ goto out;
}
- return 0;
+
+ __hotcpu_notifier(smp_cpu_notify, 0);
+
+out:
+ cpu_notifier_register_done();
+ return rc;
}
subsys_initcall(s390_smp_init);
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index c21c673e5f7c..a364000ca1aa 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -300,7 +300,7 @@ static int __init topology_init(void)
check_mmu_stats();
- register_cpu_notifier(&sysfs_cpu_nb);
+ cpu_notifier_register_begin();
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -310,6 +310,10 @@ static int __init topology_init(void)
register_cpu_online(cpu);
}
+ __register_cpu_notifier(&sysfs_cpu_nb);
+
+ cpu_notifier_register_done();
+
return 0;
}
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 0641113e2965..a952e9c85b6f 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -1225,21 +1225,24 @@ static struct notifier_block cacheinfo_cpu_notifier = {
static int __init cache_sysfs_init(void)
{
- int i;
+ int i, err = 0;
if (num_cache_leaves == 0)
return 0;
+ cpu_notifier_register_begin();
for_each_online_cpu(i) {
- int err;
struct device *dev = get_cpu_device(i);
err = cache_add_dev(dev);
if (err)
- return err;
+ goto out;
}
- register_hotcpu_notifier(&cacheinfo_cpu_notifier);
- return 0;
+ __register_hotcpu_notifier(&cacheinfo_cpu_notifier);
+
+out:
+ cpu_notifier_register_done();
+ return err;
}
device_initcall(cache_sysfs_init);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 4d5419b249da..9b7734b1f975 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2434,14 +2434,18 @@ static __init int mcheck_init_device(void)
if (err)
return err;
+ cpu_notifier_register_begin();
for_each_online_cpu(i) {
err = mce_device_create(i);
- if (err)
+ if (err) {
+ cpu_notifier_register_done();
return err;
+ }
}
register_syscore_ops(&mce_syscore_ops);
- register_hotcpu_notifier(&mce_cpu_notifier);
+ __register_hotcpu_notifier(&mce_cpu_notifier);
+ cpu_notifier_register_done();
/* register character device /dev/mcelog */
misc_register(&mce_chrdev_device);
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 3eec7de76efb..d921b7ee6595 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -271,9 +271,6 @@ static void thermal_throttle_remove_dev(struct device *dev)
sysfs_remove_group(&dev->kobj, &thermal_attr_group);
}
-/* Mutex protecting device creation against CPU hotplug: */
-static DEFINE_MUTEX(therm_cpu_lock);
-
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
static int
thermal_throttle_cpu_callback(struct notifier_block *nfb,
@@ -289,18 +286,14 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- mutex_lock(&therm_cpu_lock);
err = thermal_throttle_add_dev(dev, cpu);
- mutex_unlock(&therm_cpu_lock);
WARN_ON(err);
break;
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- mutex_lock(&therm_cpu_lock);
thermal_throttle_remove_dev(dev);
- mutex_unlock(&therm_cpu_lock);
break;
}
return notifier_from_errno(err);
@@ -319,19 +312,16 @@ static __init int thermal_throttle_init_device(void)
if (!atomic_read(&therm_throt_en))
return 0;
- register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
+ cpu_notifier_register_begin();
-#ifdef CONFIG_HOTPLUG_CPU
- mutex_lock(&therm_cpu_lock);
-#endif
/* connect live CPUs to sysfs */
for_each_online_cpu(cpu) {
err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu);
WARN_ON(err);
}
-#ifdef CONFIG_HOTPLUG_CPU
- mutex_unlock(&therm_cpu_lock);
-#endif
+
+ __register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
+ cpu_notifier_register_done();
return 0;
}
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 4b8e4d3cd6ea..4c36bbe3173a 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -926,13 +926,13 @@ static __init int amd_ibs_init(void)
goto out;
perf_ibs_pm_init();
- get_online_cpus();
+ cpu_notifier_register_begin();
ibs_caps = caps;
/* make ibs_caps visible to other cpus: */
smp_mb();
- perf_cpu_notifier(perf_ibs_cpu_notifier);
smp_call_function(setup_APIC_ibs, NULL, 1);
- put_online_cpus();
+ __perf_cpu_notifier(perf_ibs_cpu_notifier);
+ cpu_notifier_register_done();
ret = perf_event_ibs_init();
out:
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index 754291adec33..3bbdf4cd38b9 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -531,15 +531,16 @@ static int __init amd_uncore_init(void)
if (ret)
return -ENODEV;
- get_online_cpus();
+ cpu_notifier_register_begin();
+
/* init cpus already online before registering for hotplug notifier */
for_each_online_cpu(cpu) {
amd_uncore_cpu_up_prepare(cpu);
smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
}
- register_cpu_notifier(&amd_uncore_cpu_notifier_block);
- put_online_cpus();
+ __register_cpu_notifier(&amd_uncore_cpu_notifier_block);
+ cpu_notifier_register_done();
return 0;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 5ad35ad94d0f..059218ed5208 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -646,19 +646,20 @@ static int __init rapl_pmu_init(void)
/* unsupported */
return 0;
}
- get_online_cpus();
+
+ cpu_notifier_register_begin();
for_each_online_cpu(cpu) {
rapl_cpu_prepare(cpu);
rapl_cpu_init(cpu);
}
- perf_cpu_notifier(rapl_cpu_notifier);
+ __perf_cpu_notifier(rapl_cpu_notifier);
ret = perf_pmu_register(&rapl_pmu_class, "power", -1);
if (WARN_ON(ret)) {
pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret);
- put_online_cpus();
+ cpu_notifier_register_done();
return -1;
}
@@ -672,7 +673,7 @@ static int __init rapl_pmu_init(void)
hweight32(rapl_cntr_mask),
ktime_to_ms(pmu->timer_interval));
- put_online_cpus();
+ cpu_notifier_register_done();
return 0;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index bd2253d40cff..65bbbea38b9c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -4244,7 +4244,7 @@ static void __init uncore_cpumask_init(void)
if (!cpumask_empty(&uncore_cpu_mask))
return;
- get_online_cpus();
+ cpu_notifier_register_begin();
for_each_online_cpu(cpu) {
int i, phys_id = topology_physical_package_id(cpu);
@@ -4263,9 +4263,9 @@ static void __init uncore_cpumask_init(void)
}
on_each_cpu(uncore_cpu_setup, NULL, 1);
- register_cpu_notifier(&uncore_cpu_nb);
+ __register_cpu_notifier(&uncore_cpu_nb);
- put_online_cpus();
+ cpu_notifier_register_done();
}
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 7d9481c743f8..3225ae6c5180 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -198,14 +198,15 @@ static int __init cpuid_init(void)
goto out_chrdev;
}
cpuid_class->devnode = cpuid_devnode;
- get_online_cpus();
+
+ cpu_notifier_register_begin();
for_each_online_cpu(i) {
err = cpuid_device_create(i);
if (err != 0)
goto out_class;
}
- register_hotcpu_notifier(&cpuid_class_cpu_notifier);
- put_online_cpus();
+ __register_hotcpu_notifier(&cpuid_class_cpu_notifier);
+ cpu_notifier_register_done();
err = 0;
goto out;
@@ -215,7 +216,7 @@ out_class:
for_each_online_cpu(i) {
cpuid_device_destroy(i);
}
- put_online_cpus();
+ cpu_notifier_register_done();
class_destroy(cpuid_class);
out_chrdev:
__unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
@@ -227,13 +228,13 @@ static void __exit cpuid_exit(void)
{
int cpu = 0;
- get_online_cpus();
+ cpu_notifier_register_begin();
for_each_online_cpu(cpu)
cpuid_device_destroy(cpu);
class_destroy(cpuid_class);
__unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
- unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
- put_online_cpus();
+ __unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
+ cpu_notifier_register_done();
}
module_init(cpuid_init);
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 93eed15a8fd4..8d80ae011603 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -941,12 +941,14 @@ static __init int hpet_late_init(void)
if (boot_cpu_has(X86_FEATURE_ARAT))
return 0;
+ cpu_notifier_register_begin();
for_each_online_cpu(cpu) {
hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
}
/* This notifier should be called after workqueue is ready */
- hotcpu_notifier(hpet_cpuhp_notify, -20);
+ __hotcpu_notifier(hpet_cpuhp_notify, -20);
+ cpu_notifier_register_done();
return 0;
}
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 05266b5aae22..c9603ac80de5 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -259,14 +259,15 @@ static int __init msr_init(void)
goto out_chrdev;
}
msr_class->devnode = msr_devnode;
- get_online_cpus();
+
+ cpu_notifier_register_begin();
for_each_online_cpu(i) {
err = msr_device_create(i);
if (err != 0)
goto out_class;
}
- register_hotcpu_notifier(&msr_class_cpu_notifier);
- put_online_cpus();
+ __register_hotcpu_notifier(&msr_class_cpu_notifier);
+ cpu_notifier_register_done();
err = 0;
goto out;
@@ -275,7 +276,7 @@ out_class:
i = 0;
for_each_online_cpu(i)
msr_device_destroy(i);
- put_online_cpus();
+ cpu_notifier_register_done();
class_destroy(msr_class);
out_chrdev:
__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
@@ -286,13 +287,14 @@ out:
static void __exit msr_exit(void)
{
int cpu = 0;
- get_online_cpus();
+
+ cpu_notifier_register_begin();
for_each_online_cpu(cpu)
msr_device_destroy(cpu);
class_destroy(msr_class);
__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
- unregister_hotcpu_notifier(&msr_class_cpu_notifier);
- put_online_cpus();
+ __unregister_hotcpu_notifier(&msr_class_cpu_notifier);
+ cpu_notifier_register_done();
}
module_init(msr_init);
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 9ea287666c65..8b3b3eb3cead 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -348,9 +348,13 @@ static int __init vsyscall_init(void)
{
BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
+ cpu_notifier_register_begin();
+
on_each_cpu(cpu_vsyscall_init, NULL, 1);
/* notifier priority > KVM */
- hotcpu_notifier(cpu_vsyscall_notifier, 30);
+ __hotcpu_notifier(cpu_vsyscall_notifier, 30);
+
+ cpu_notifier_register_done();
return 0;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d1c55f8722c6..9d1b5cd4d34c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5422,7 +5422,8 @@ static void kvm_timer_init(void)
int cpu;
max_tsc_khz = tsc_khz;
- register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
+
+ cpu_notifier_register_begin();
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy policy;
@@ -5439,6 +5440,10 @@ static void kvm_timer_init(void)
pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
for_each_online_cpu(cpu)
smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
+
+ __register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
+ cpu_notifier_register_done();
+
}
static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 6890d8498e0b..379e8bd0deea 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -494,14 +494,19 @@ static int nmi_setup(void)
if (err)
goto fail;
+ cpu_notifier_register_begin();
+
+ /* Use get/put_online_cpus() to protect 'nmi_enabled' */
get_online_cpus();
- register_cpu_notifier(&oprofile_cpu_nb);
nmi_enabled = 1;
/* make nmi_enabled visible to the nmi handler: */
smp_mb();
on_each_cpu(nmi_cpu_setup, NULL, 1);
+ __register_cpu_notifier(&oprofile_cpu_nb);
put_online_cpus();
+ cpu_notifier_register_done();
+
return 0;
fail:
free_msrs();
@@ -512,12 +517,18 @@ static void nmi_shutdown(void)
{
struct op_msrs *msrs;
+ cpu_notifier_register_begin();
+
+ /* Use get/put_online_cpus() to protect 'nmi_enabled' & 'ctr_running' */
get_online_cpus();
- unregister_cpu_notifier(&oprofile_cpu_nb);
on_each_cpu(nmi_cpu_shutdown, NULL, 1);
nmi_enabled = 0;
ctr_running = 0;
+ __unregister_cpu_notifier(&oprofile_cpu_nb);
put_online_cpus();
+
+ cpu_notifier_register_done();
+
/* make variables visible to the nmi handler: */
smp_mb();
unregister_nmi_handler(NMI_LOCAL, "oprofile");
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index a313a7fb6b86..e88f4c53d7f6 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -370,10 +370,13 @@ static int __init pci_io_ecs_init(void)
if (early_pci_allowed())
pci_enable_pci_io_ecs();
- register_cpu_notifier(&amd_cpu_notifier);
+ cpu_notifier_register_begin();
for_each_online_cpu(cpu)
amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE,
(void *)(long)cpu);
+ __register_cpu_notifier(&amd_cpu_notifier);
+ cpu_notifier_register_done();
+
pci_probe |= PCI_HAS_IO_ECS;
return 0;