]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'cpu-hotplug-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 7 Apr 2014 21:55:46 +0000 (14:55 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 7 Apr 2014 21:55:46 +0000 (14:55 -0700)
Pull CPU hotplug notifiers registration fixes from Rafael Wysocki:
 "The purpose of this single series of commits from Srivatsa S Bhat
  (with a small piece from Gautham R Shenoy) touching multiple
  subsystems that use CPU hotplug notifiers is to provide a way to
  register them that will not lead to deadlocks with CPU online/offline
  operations as described in the changelog of commit 93ae4f978ca7f ("CPU
  hotplug: Provide lockless versions of callback registration
  functions").

  The first three commits in the series introduce the API and document
  it and the rest simply goes through the users of CPU hotplug notifiers
  and converts them to using the new method"

* tag 'cpu-hotplug-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (52 commits)
  net/iucv/iucv.c: Fix CPU hotplug callback registration
  net/core/flow.c: Fix CPU hotplug callback registration
  mm, zswap: Fix CPU hotplug callback registration
  mm, vmstat: Fix CPU hotplug callback registration
  profile: Fix CPU hotplug callback registration
  trace, ring-buffer: Fix CPU hotplug callback registration
  xen, balloon: Fix CPU hotplug callback registration
  hwmon, via-cputemp: Fix CPU hotplug callback registration
  hwmon, coretemp: Fix CPU hotplug callback registration
  thermal, x86-pkg-temp: Fix CPU hotplug callback registration
  octeon, watchdog: Fix CPU hotplug callback registration
  oprofile, nmi-timer: Fix CPU hotplug callback registration
  intel-idle: Fix CPU hotplug callback registration
  clocksource, dummy-timer: Fix CPU hotplug callback registration
  drivers/base/topology.c: Fix CPU hotplug callback registration
  acpi-cpufreq: Fix CPU hotplug callback registration
  zsmalloc: Fix CPU hotplug callback registration
  scsi, fcoe: Fix CPU hotplug callback registration
  scsi, bnx2fc: Fix CPU hotplug callback registration
  scsi, bnx2i: Fix CPU hotplug callback registration
  ...

51 files changed:
Documentation/cpu-hotplug.txt
arch/arm/kernel/hw_breakpoint.c
arch/arm/kvm/arm.c
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/hw_breakpoint.c
arch/ia64/kernel/err_inject.c
arch/ia64/kernel/palinfo.c
arch/ia64/kernel/salinfo.c
arch/ia64/kernel/topology.c
arch/powerpc/kernel/sysfs.c
arch/s390/kernel/cache.c
arch/s390/kernel/smp.c
arch/sparc/kernel/sysfs.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/cpu/perf_event_amd_ibs.c
arch/x86/kernel/cpu/perf_event_amd_uncore.c
arch/x86/kernel/cpu/perf_event_intel_rapl.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/cpuid.c
arch/x86/kernel/hpet.c
arch/x86/kernel/msr.c
arch/x86/kernel/vsyscall_64.c
arch/x86/kvm/x86.c
arch/x86/oprofile/nmi_int.c
arch/x86/pci/amd_bus.c
drivers/base/topology.c
drivers/clocksource/dummy_timer.c
drivers/cpufreq/acpi-cpufreq.c
drivers/hwmon/coretemp.c
drivers/hwmon/via-cputemp.c
drivers/idle/intel_idle.c
drivers/oprofile/nmi_timer_int.c
drivers/powercap/intel_rapl.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2i/bnx2i_init.c
drivers/scsi/fcoe/fcoe.c
drivers/thermal/x86_pkg_temp_thermal.c
drivers/watchdog/octeon-wdt-main.c
drivers/xen/balloon.c
include/linux/cpu.h
include/linux/perf_event.h
kernel/cpu.c
kernel/profile.c
kernel/trace/ring_buffer.c
mm/vmstat.c
mm/zsmalloc.c
mm/zswap.c
net/core/flow.c
net/iucv/iucv.c

index be675d2d15a73a4f784d7192931e187be294109d..a0b005d2bd95ce8d0251679adc90002a6e3b4809 100644 (file)
@@ -312,12 +312,57 @@ things will happen if a notifier in path sent a BAD notify code.
 Q: I don't see my action being called for all CPUs already up and running?
 A: Yes, CPU notifiers are called only when new CPUs are on-lined or offlined.
    If you need to perform some action for each cpu already in the system, then
+   do this:
 
        for_each_online_cpu(i) {
                foobar_cpu_callback(&foobar_cpu_notifier, CPU_UP_PREPARE, i);
                foobar_cpu_callback(&foobar_cpu_notifier, CPU_ONLINE, i);
        }
 
+   However, if you want to register a hotplug callback, as well as perform
+   some initialization for CPUs that are already online, then do this:
+
+   Version 1: (Correct)
+   ---------
+
+       cpu_notifier_register_begin();
+
+               for_each_online_cpu(i) {
+                       foobar_cpu_callback(&foobar_cpu_notifier,
+                                           CPU_UP_PREPARE, i);
+                       foobar_cpu_callback(&foobar_cpu_notifier,
+                                           CPU_ONLINE, i);
+               }
+
+       /* Note the use of the double underscored version of the API */
+       __register_cpu_notifier(&foobar_cpu_notifier);
+
+       cpu_notifier_register_done();
+
+   Note that the following code is *NOT* the right way to achieve this,
+   because it is prone to an ABBA deadlock between the cpu_add_remove_lock
+   and the cpu_hotplug.lock.
+
+   Version 2: (Wrong!)
+   ---------
+
+       get_online_cpus();
+
+               for_each_online_cpu(i) {
+                       foobar_cpu_callback(&foobar_cpu_notifier,
+                                           CPU_UP_PREPARE, i);
+                       foobar_cpu_callback(&foobar_cpu_notifier,
+                                           CPU_ONLINE, i);
+               }
+
+       register_cpu_notifier(&foobar_cpu_notifier);
+
+       put_online_cpus();
+
+    So always use the first version shown above when you want to register
+    callbacks as well as initialize the already online CPUs.
+
+
 Q: If i would like to develop cpu hotplug support for a new architecture,
    what do i need at a minimum?
 A: The following are what is required for CPU hotplug infrastructure to work
index 9da35c6d3411007f76a2eb9e31c5cf836397472a..4d963fb66e3f0bce4590e5885415916be9aba866 100644 (file)
@@ -1073,6 +1073,8 @@ static int __init arch_hw_breakpoint_init(void)
        core_num_brps = get_num_brps();
        core_num_wrps = get_num_wrps();
 
+       cpu_notifier_register_begin();
+
        /*
         * We need to tread carefully here because DBGSWENABLE may be
         * driven low on this core and there isn't an architected way to
@@ -1089,6 +1091,7 @@ static int __init arch_hw_breakpoint_init(void)
        if (!cpumask_empty(&debug_err_mask)) {
                core_num_brps = 0;
                core_num_wrps = 0;
+               cpu_notifier_register_done();
                return 0;
        }
 
@@ -1108,7 +1111,10 @@ static int __init arch_hw_breakpoint_init(void)
                        TRAP_HWBKPT, "breakpoint debug exception");
 
        /* Register hotplug and PM notifiers. */
-       register_cpu_notifier(&dbg_reset_nb);
+       __register_cpu_notifier(&dbg_reset_nb);
+
+       cpu_notifier_register_done();
+
        pm_init();
        return 0;
 }
index bd18bb8b2770ced6a65e5b3ad50dd79963d49c3b..f0e50a0f3a65b1c0476ec18db3ff914594c1061f 100644 (file)
@@ -1051,21 +1051,26 @@ int kvm_arch_init(void *opaque)
                }
        }
 
+       cpu_notifier_register_begin();
+
        err = init_hyp_mode();
        if (err)
                goto out_err;
 
-       err = register_cpu_notifier(&hyp_init_cpu_nb);
+       err = __register_cpu_notifier(&hyp_init_cpu_nb);
        if (err) {
                kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
                goto out_err;
        }
 
+       cpu_notifier_register_done();
+
        hyp_cpu_pm_init();
 
        kvm_coproc_table_init();
        return 0;
 out_err:
+       cpu_notifier_register_done();
        return err;
 }
 
index 14ba23c6115367b962922e5decd3afabb59e789e..ed3955a95747286ebcb3f705c107dc1b3af90423 100644 (file)
@@ -154,13 +154,17 @@ static struct notifier_block os_lock_nb = {
 
 static int debug_monitors_init(void)
 {
+       cpu_notifier_register_begin();
+
        /* Clear the OS lock. */
        on_each_cpu(clear_os_lock, NULL, 1);
        isb();
        local_dbg_enable();
 
        /* Register hotplug handler. */
-       register_cpu_notifier(&os_lock_nb);
+       __register_cpu_notifier(&os_lock_nb);
+
+       cpu_notifier_register_done();
        return 0;
 }
 postcore_initcall(debug_monitors_init);
index f17f581116fc15762d7092f1fada45e06e3dd7d6..bee789757806a268864d6dc1fe8ac8a0ebb97fd7 100644 (file)
@@ -913,6 +913,8 @@ static int __init arch_hw_breakpoint_init(void)
        pr_info("found %d breakpoint and %d watchpoint registers.\n",
                core_num_brps, core_num_wrps);
 
+       cpu_notifier_register_begin();
+
        /*
         * Reset the breakpoint resources. We assume that a halting
         * debugger will leave the world in a nice state for us.
@@ -927,7 +929,10 @@ static int __init arch_hw_breakpoint_init(void)
                              TRAP_HWBKPT, "hw-watchpoint handler");
 
        /* Register hotplug notifier. */
-       register_cpu_notifier(&hw_breakpoint_reset_nb);
+       __register_cpu_notifier(&hw_breakpoint_reset_nb);
+
+       cpu_notifier_register_done();
+
        /* Register cpu_suspend hw breakpoint restore hook */
        cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
 
index f59c0b844e8855ea55989ad6b814fad125b45879..0c161ed6d18e6d77433c8bafabfdd48f904a077d 100644 (file)
@@ -269,12 +269,17 @@ err_inject_init(void)
 #ifdef ERR_INJ_DEBUG
        printk(KERN_INFO "Enter error injection driver.\n");
 #endif
+
+       cpu_notifier_register_begin();
+
        for_each_online_cpu(i) {
                err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE,
                                (void *)(long)i);
        }
 
-       register_hotcpu_notifier(&err_inject_cpu_notifier);
+       __register_hotcpu_notifier(&err_inject_cpu_notifier);
+
+       cpu_notifier_register_done();
 
        return 0;
 }
@@ -288,11 +293,17 @@ err_inject_exit(void)
 #ifdef ERR_INJ_DEBUG
        printk(KERN_INFO "Exit error injection driver.\n");
 #endif
+
+       cpu_notifier_register_begin();
+
        for_each_online_cpu(i) {
                sys_dev = get_cpu_device(i);
                sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
        }
-       unregister_hotcpu_notifier(&err_inject_cpu_notifier);
+
+       __unregister_hotcpu_notifier(&err_inject_cpu_notifier);
+
+       cpu_notifier_register_done();
 }
 
 module_init(err_inject_init);
index ab333284f4b2eb45ce2294b337d8e3196a081721..c39c3cd3ac348a414787d4db30ec6c6181f7f6d9 100644 (file)
@@ -996,13 +996,17 @@ palinfo_init(void)
        if (!palinfo_dir)
                return -ENOMEM;
 
+       cpu_notifier_register_begin();
+
        /* Create palinfo dirs in /proc for all online cpus */
        for_each_online_cpu(i) {
                create_palinfo_proc_entries(i);
        }
 
        /* Register for future delivery via notify registration */
-       register_hotcpu_notifier(&palinfo_cpu_notifier);
+       __register_hotcpu_notifier(&palinfo_cpu_notifier);
+
+       cpu_notifier_register_done();
 
        return 0;
 }
index 960a396f5929667162bf0bb867ac03d7aa85e302..ee9719eebb1e217989f04de5ae0b72e870c876fa 100644 (file)
@@ -635,6 +635,8 @@ salinfo_init(void)
                                           (void *)salinfo_entries[i].feature);
        }
 
+       cpu_notifier_register_begin();
+
        for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
                data = salinfo_data + i;
                data->type = i;
@@ -669,7 +671,9 @@ salinfo_init(void)
        salinfo_timer.function = &salinfo_timeout;
        add_timer(&salinfo_timer);
 
-       register_hotcpu_notifier(&salinfo_cpu_notifier);
+       __register_hotcpu_notifier(&salinfo_cpu_notifier);
+
+       cpu_notifier_register_done();
 
        return 0;
 }
index ca69a5a96dcc07bb760eabd43ff6a03dfc2d6db2..f295f9abba4b04772679a6ff232a2f1387572c27 100644 (file)
@@ -454,12 +454,16 @@ static int __init cache_sysfs_init(void)
 {
        int i;
 
+       cpu_notifier_register_begin();
+
        for_each_online_cpu(i) {
                struct device *sys_dev = get_cpu_device((unsigned int)i);
                cache_add_dev(sys_dev);
        }
 
-       register_hotcpu_notifier(&cache_cpu_notifier);
+       __register_hotcpu_notifier(&cache_cpu_notifier);
+
+       cpu_notifier_register_done();
 
        return 0;
 }
index 97e1dc91768374e7a7c5b56eb6752b7df5ae05ea..d90d4b7810d69edfa2283367aa7146edbcac8fa0 100644 (file)
@@ -975,7 +975,8 @@ static int __init topology_init(void)
        int cpu;
 
        register_nodes();
-       register_cpu_notifier(&sysfs_cpu_nb);
+
+       cpu_notifier_register_begin();
 
        for_each_possible_cpu(cpu) {
                struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -999,6 +1000,11 @@ static int __init topology_init(void)
                if (cpu_online(cpu))
                        register_cpu_online(cpu);
        }
+
+       __register_cpu_notifier(&sysfs_cpu_nb);
+
+       cpu_notifier_register_done();
+
 #ifdef CONFIG_PPC64
        sysfs_create_dscr_default();
 #endif /* CONFIG_PPC64 */
index 3a414c0f93edcd08d69d3a1aa2af645c098327c9..c0b03c28d15717448f4f713c083bbd891861f42b 100644 (file)
@@ -378,9 +378,12 @@ static int __init cache_init(void)
        if (!test_facility(34))
                return 0;
        cache_build_info();
+
+       cpu_notifier_register_begin();
        for_each_online_cpu(cpu)
                cache_add_cpu(cpu);
-       hotcpu_notifier(cache_hotplug, 0);
+       __hotcpu_notifier(cache_hotplug, 0);
+       cpu_notifier_register_done();
        return 0;
 }
 device_initcall(cache_init);
index 8827883310ddbc05c765f17b2125e804dc2cd85e..5a640b395bd4d37ced17d847fd01e6869b487363 100644 (file)
@@ -1057,19 +1057,24 @@ static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
 
 static int __init s390_smp_init(void)
 {
-       int cpu, rc;
+       int cpu, rc = 0;
 
-       hotcpu_notifier(smp_cpu_notify, 0);
 #ifdef CONFIG_HOTPLUG_CPU
        rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
        if (rc)
                return rc;
 #endif
+       cpu_notifier_register_begin();
        for_each_present_cpu(cpu) {
                rc = smp_add_present_cpu(cpu);
                if (rc)
-                       return rc;
+                       goto out;
        }
-       return 0;
+
+       __hotcpu_notifier(smp_cpu_notify, 0);
+
+out:
+       cpu_notifier_register_done();
+       return rc;
 }
 subsys_initcall(s390_smp_init);
index c21c673e5f7cde71cdeca0c4b48bf565df96bfd7..a364000ca1aa8a495f7e8b9bf59882350f41c6de 100644 (file)
@@ -300,7 +300,7 @@ static int __init topology_init(void)
 
        check_mmu_stats();
 
-       register_cpu_notifier(&sysfs_cpu_nb);
+       cpu_notifier_register_begin();
 
        for_each_possible_cpu(cpu) {
                struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -310,6 +310,10 @@ static int __init topology_init(void)
                        register_cpu_online(cpu);
        }
 
+       __register_cpu_notifier(&sysfs_cpu_nb);
+
+       cpu_notifier_register_done();
+
        return 0;
 }
 
index 0641113e296598c058cab739d94c0ae2f5280ec0..a952e9c85b6fad81684c4bd39418bd5623a634ff 100644 (file)
@@ -1225,21 +1225,24 @@ static struct notifier_block cacheinfo_cpu_notifier = {
 
 static int __init cache_sysfs_init(void)
 {
-       int i;
+       int i, err = 0;
 
        if (num_cache_leaves == 0)
                return 0;
 
+       cpu_notifier_register_begin();
        for_each_online_cpu(i) {
-               int err;
                struct device *dev = get_cpu_device(i);
 
                err = cache_add_dev(dev);
                if (err)
-                       return err;
+                       goto out;
        }
-       register_hotcpu_notifier(&cacheinfo_cpu_notifier);
-       return 0;
+       __register_hotcpu_notifier(&cacheinfo_cpu_notifier);
+
+out:
+       cpu_notifier_register_done();
+       return err;
 }
 
 device_initcall(cache_sysfs_init);
index 4d5419b249da5353afb24edd23c8410d60f8079d..9b7734b1f975a4c0cfc9776749aecc83ace1cd10 100644 (file)
@@ -2434,14 +2434,18 @@ static __init int mcheck_init_device(void)
        if (err)
                return err;
 
+       cpu_notifier_register_begin();
        for_each_online_cpu(i) {
                err = mce_device_create(i);
-               if (err)
+               if (err) {
+                       cpu_notifier_register_done();
                        return err;
+               }
        }
 
        register_syscore_ops(&mce_syscore_ops);
-       register_hotcpu_notifier(&mce_cpu_notifier);
+       __register_hotcpu_notifier(&mce_cpu_notifier);
+       cpu_notifier_register_done();
 
        /* register character device /dev/mcelog */
        misc_register(&mce_chrdev_device);
index 3eec7de76efbb4650ea760f5163d495f47fd1f1d..d921b7ee659525e7d040ff5ea5c6a569239a64a7 100644 (file)
@@ -271,9 +271,6 @@ static void thermal_throttle_remove_dev(struct device *dev)
        sysfs_remove_group(&dev->kobj, &thermal_attr_group);
 }
 
-/* Mutex protecting device creation against CPU hotplug: */
-static DEFINE_MUTEX(therm_cpu_lock);
-
 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
 static int
 thermal_throttle_cpu_callback(struct notifier_block *nfb,
@@ -289,18 +286,14 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
        switch (action) {
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               mutex_lock(&therm_cpu_lock);
                err = thermal_throttle_add_dev(dev, cpu);
-               mutex_unlock(&therm_cpu_lock);
                WARN_ON(err);
                break;
        case CPU_UP_CANCELED:
        case CPU_UP_CANCELED_FROZEN:
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
-               mutex_lock(&therm_cpu_lock);
                thermal_throttle_remove_dev(dev);
-               mutex_unlock(&therm_cpu_lock);
                break;
        }
        return notifier_from_errno(err);
@@ -319,19 +312,16 @@ static __init int thermal_throttle_init_device(void)
        if (!atomic_read(&therm_throt_en))
                return 0;
 
-       register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
+       cpu_notifier_register_begin();
 
-#ifdef CONFIG_HOTPLUG_CPU
-       mutex_lock(&therm_cpu_lock);
-#endif
        /* connect live CPUs to sysfs */
        for_each_online_cpu(cpu) {
                err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu);
                WARN_ON(err);
        }
-#ifdef CONFIG_HOTPLUG_CPU
-       mutex_unlock(&therm_cpu_lock);
-#endif
+
+       __register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
+       cpu_notifier_register_done();
 
        return 0;
 }
index 4b8e4d3cd6ea62bc9c9047e869fa84cbd9cbe33a..4c36bbe3173aa0f683a5a982cc857c6f3ffa3e0a 100644 (file)
@@ -926,13 +926,13 @@ static __init int amd_ibs_init(void)
                goto out;
 
        perf_ibs_pm_init();
-       get_online_cpus();
+       cpu_notifier_register_begin();
        ibs_caps = caps;
        /* make ibs_caps visible to other cpus: */
        smp_mb();
-       perf_cpu_notifier(perf_ibs_cpu_notifier);
        smp_call_function(setup_APIC_ibs, NULL, 1);
-       put_online_cpus();
+       __perf_cpu_notifier(perf_ibs_cpu_notifier);
+       cpu_notifier_register_done();
 
        ret = perf_event_ibs_init();
 out:
index 754291adec338b34317d5c07a359741e5e298d80..3bbdf4cd38b9c4e38dcf8e4f480753e93858631a 100644 (file)
@@ -531,15 +531,16 @@ static int __init amd_uncore_init(void)
        if (ret)
                return -ENODEV;
 
-       get_online_cpus();
+       cpu_notifier_register_begin();
+
        /* init cpus already online before registering for hotplug notifier */
        for_each_online_cpu(cpu) {
                amd_uncore_cpu_up_prepare(cpu);
                smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
        }
 
-       register_cpu_notifier(&amd_uncore_cpu_notifier_block);
-       put_online_cpus();
+       __register_cpu_notifier(&amd_uncore_cpu_notifier_block);
+       cpu_notifier_register_done();
 
        return 0;
 }
index 5ad35ad94d0f819b1cf54f627d5df4fcfc58e460..059218ed5208e83ab40d44374064a53fdeab5568 100644 (file)
@@ -646,19 +646,20 @@ static int __init rapl_pmu_init(void)
                /* unsupported */
                return 0;
        }
-       get_online_cpus();
+
+       cpu_notifier_register_begin();
 
        for_each_online_cpu(cpu) {
                rapl_cpu_prepare(cpu);
                rapl_cpu_init(cpu);
        }
 
-       perf_cpu_notifier(rapl_cpu_notifier);
+       __perf_cpu_notifier(rapl_cpu_notifier);
 
        ret = perf_pmu_register(&rapl_pmu_class, "power", -1);
        if (WARN_ON(ret)) {
                pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret);
-               put_online_cpus();
+               cpu_notifier_register_done();
                return -1;
        }
 
@@ -672,7 +673,7 @@ static int __init rapl_pmu_init(void)
                hweight32(rapl_cntr_mask),
                ktime_to_ms(pmu->timer_interval));
 
-       put_online_cpus();
+       cpu_notifier_register_done();
 
        return 0;
 }
index bd2253d40cffe16363569384084bdf6d6d73e7f5..65bbbea38b9c9c0f7246a3c4fee4176dd529647b 100644 (file)
@@ -4244,7 +4244,7 @@ static void __init uncore_cpumask_init(void)
        if (!cpumask_empty(&uncore_cpu_mask))
                return;
 
-       get_online_cpus();
+       cpu_notifier_register_begin();
 
        for_each_online_cpu(cpu) {
                int i, phys_id = topology_physical_package_id(cpu);
@@ -4263,9 +4263,9 @@ static void __init uncore_cpumask_init(void)
        }
        on_each_cpu(uncore_cpu_setup, NULL, 1);
 
-       register_cpu_notifier(&uncore_cpu_nb);
+       __register_cpu_notifier(&uncore_cpu_nb);
 
-       put_online_cpus();
+       cpu_notifier_register_done();
 }
 
 
index 7d9481c743f8439ae3a36256f008c45380ee49e8..3225ae6c51806af1590ce780a68ff67e6ca8ae77 100644 (file)
@@ -198,14 +198,15 @@ static int __init cpuid_init(void)
                goto out_chrdev;
        }
        cpuid_class->devnode = cpuid_devnode;
-       get_online_cpus();
+
+       cpu_notifier_register_begin();
        for_each_online_cpu(i) {
                err = cpuid_device_create(i);
                if (err != 0)
                        goto out_class;
        }
-       register_hotcpu_notifier(&cpuid_class_cpu_notifier);
-       put_online_cpus();
+       __register_hotcpu_notifier(&cpuid_class_cpu_notifier);
+       cpu_notifier_register_done();
 
        err = 0;
        goto out;
@@ -215,7 +216,7 @@ out_class:
        for_each_online_cpu(i) {
                cpuid_device_destroy(i);
        }
-       put_online_cpus();
+       cpu_notifier_register_done();
        class_destroy(cpuid_class);
 out_chrdev:
        __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
@@ -227,13 +228,13 @@ static void __exit cpuid_exit(void)
 {
        int cpu = 0;
 
-       get_online_cpus();
+       cpu_notifier_register_begin();
        for_each_online_cpu(cpu)
                cpuid_device_destroy(cpu);
        class_destroy(cpuid_class);
        __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
-       unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
-       put_online_cpus();
+       __unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
+       cpu_notifier_register_done();
 }
 
 module_init(cpuid_init);
index 93eed15a8fd41aeb4556117a932c9afa591893b3..8d80ae0116039b6c71945737befc4f88dea097df 100644 (file)
@@ -941,12 +941,14 @@ static __init int hpet_late_init(void)
        if (boot_cpu_has(X86_FEATURE_ARAT))
                return 0;
 
+       cpu_notifier_register_begin();
        for_each_online_cpu(cpu) {
                hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
        }
 
        /* This notifier should be called after workqueue is ready */
-       hotcpu_notifier(hpet_cpuhp_notify, -20);
+       __hotcpu_notifier(hpet_cpuhp_notify, -20);
+       cpu_notifier_register_done();
 
        return 0;
 }
index 05266b5aae22916e864bb21e1399193ff253374e..c9603ac80de5ecbc32ac80027d33a815ee6f9e94 100644 (file)
@@ -259,14 +259,15 @@ static int __init msr_init(void)
                goto out_chrdev;
        }
        msr_class->devnode = msr_devnode;
-       get_online_cpus();
+
+       cpu_notifier_register_begin();
        for_each_online_cpu(i) {
                err = msr_device_create(i);
                if (err != 0)
                        goto out_class;
        }
-       register_hotcpu_notifier(&msr_class_cpu_notifier);
-       put_online_cpus();
+       __register_hotcpu_notifier(&msr_class_cpu_notifier);
+       cpu_notifier_register_done();
 
        err = 0;
        goto out;
@@ -275,7 +276,7 @@ out_class:
        i = 0;
        for_each_online_cpu(i)
                msr_device_destroy(i);
-       put_online_cpus();
+       cpu_notifier_register_done();
        class_destroy(msr_class);
 out_chrdev:
        __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
@@ -286,13 +287,14 @@ out:
 static void __exit msr_exit(void)
 {
        int cpu = 0;
-       get_online_cpus();
+
+       cpu_notifier_register_begin();
        for_each_online_cpu(cpu)
                msr_device_destroy(cpu);
        class_destroy(msr_class);
        __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
-       unregister_hotcpu_notifier(&msr_class_cpu_notifier);
-       put_online_cpus();
+       __unregister_hotcpu_notifier(&msr_class_cpu_notifier);
+       cpu_notifier_register_done();
 }
 
 module_init(msr_init);
index 9ea287666c6559abaa15a090ab37a0be0eaef1ba..8b3b3eb3cead2dffbdd20d6dd4632595e723a117 100644 (file)
@@ -348,9 +348,13 @@ static int __init vsyscall_init(void)
 {
        BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
 
+       cpu_notifier_register_begin();
+
        on_each_cpu(cpu_vsyscall_init, NULL, 1);
        /* notifier priority > KVM */
-       hotcpu_notifier(cpu_vsyscall_notifier, 30);
+       __hotcpu_notifier(cpu_vsyscall_notifier, 30);
+
+       cpu_notifier_register_done();
 
        return 0;
 }
index d1c55f8722c66cf6139222c3544c97700169c523..9d1b5cd4d34cc6f585b7f18a1ac2c5ba266c9b28 100644 (file)
@@ -5422,7 +5422,8 @@ static void kvm_timer_init(void)
        int cpu;
 
        max_tsc_khz = tsc_khz;
-       register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
+
+       cpu_notifier_register_begin();
        if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
 #ifdef CONFIG_CPU_FREQ
                struct cpufreq_policy policy;
@@ -5439,6 +5440,10 @@ static void kvm_timer_init(void)
        pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
        for_each_online_cpu(cpu)
                smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
+
+       __register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
+       cpu_notifier_register_done();
+
 }
 
 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
index 6890d8498e0becb308244819265647e6d400513b..379e8bd0deeabf8bb1e839ea6bcb11f0bc1dd12b 100644 (file)
@@ -494,14 +494,19 @@ static int nmi_setup(void)
        if (err)
                goto fail;
 
+       cpu_notifier_register_begin();
+
+       /* Use get/put_online_cpus() to protect 'nmi_enabled' */
        get_online_cpus();
-       register_cpu_notifier(&oprofile_cpu_nb);
        nmi_enabled = 1;
        /* make nmi_enabled visible to the nmi handler: */
        smp_mb();
        on_each_cpu(nmi_cpu_setup, NULL, 1);
+       __register_cpu_notifier(&oprofile_cpu_nb);
        put_online_cpus();
 
+       cpu_notifier_register_done();
+
        return 0;
 fail:
        free_msrs();
@@ -512,12 +517,18 @@ static void nmi_shutdown(void)
 {
        struct op_msrs *msrs;
 
+       cpu_notifier_register_begin();
+
+       /* Use get/put_online_cpus() to protect 'nmi_enabled' & 'ctr_running' */
        get_online_cpus();
-       unregister_cpu_notifier(&oprofile_cpu_nb);
        on_each_cpu(nmi_cpu_shutdown, NULL, 1);
        nmi_enabled = 0;
        ctr_running = 0;
+       __unregister_cpu_notifier(&oprofile_cpu_nb);
        put_online_cpus();
+
+       cpu_notifier_register_done();
+
        /* make variables visible to the nmi handler: */
        smp_mb();
        unregister_nmi_handler(NMI_LOCAL, "oprofile");
index a313a7fb6b862e26b893ad8f5058e8767bf146c1..e88f4c53d7f6b41d4268b4eb51fad5badd514c2b 100644 (file)
@@ -370,10 +370,13 @@ static int __init pci_io_ecs_init(void)
        if (early_pci_allowed())
                pci_enable_pci_io_ecs();
 
-       register_cpu_notifier(&amd_cpu_notifier);
+       cpu_notifier_register_begin();
        for_each_online_cpu(cpu)
                amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE,
                               (void *)(long)cpu);
+       __register_cpu_notifier(&amd_cpu_notifier);
+       cpu_notifier_register_done();
+
        pci_probe |= PCI_HAS_IO_ECS;
 
        return 0;
index ad9d177626640377de6e517fe6d62db093039166..bbcbd3c4392689ef5022af42d0a09bb504be7253 100644 (file)
@@ -160,16 +160,20 @@ static int topology_cpu_callback(struct notifier_block *nfb,
 static int topology_sysfs_init(void)
 {
        int cpu;
-       int rc;
+       int rc = 0;
+
+       cpu_notifier_register_begin();
 
        for_each_online_cpu(cpu) {
                rc = topology_add_dev(cpu);
                if (rc)
-                       return rc;
+                       goto out;
        }
-       hotcpu_notifier(topology_cpu_callback, 0);
+       __hotcpu_notifier(topology_cpu_callback, 0);
 
-       return 0;
+out:
+       cpu_notifier_register_done();
+       return rc;
 }
 
 device_initcall(topology_sysfs_init);
index b3eb582d6a6f1956bc870067188bb03a090fd30b..ad357254172890ff4170c2a32634f57ad831b26e 100644 (file)
@@ -56,14 +56,19 @@ static struct notifier_block dummy_timer_cpu_nb = {
 
 static int __init dummy_timer_register(void)
 {
-       int err = register_cpu_notifier(&dummy_timer_cpu_nb);
+       int err = 0;
+
+       cpu_notifier_register_begin();
+       err = __register_cpu_notifier(&dummy_timer_cpu_nb);
        if (err)
-               return err;
+               goto out;
 
        /* We won't get a call on the boot CPU, so register immediately */
        if (num_possible_cpus() > 1)
                dummy_timer_setup();
 
-       return 0;
+out:
+       cpu_notifier_register_done();
+       return err;
 }
 early_initcall(dummy_timer_register);
index 822ca03a87f796ae321cb9c2bd54ffe7380b5581..d5eaedbe464f873e5a32978ac6cf86eb70169188 100644 (file)
@@ -906,15 +906,16 @@ static void __init acpi_cpufreq_boost_init(void)
 
                acpi_cpufreq_driver.boost_supported = true;
                acpi_cpufreq_driver.boost_enabled = boost_state(0);
-               get_online_cpus();
+
+               cpu_notifier_register_begin();
 
                /* Force all MSRs to the same value */
                boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
                               cpu_online_mask);
 
-               register_cpu_notifier(&boost_nb);
+               __register_cpu_notifier(&boost_nb);
 
-               put_online_cpus();
+               cpu_notifier_register_done();
        }
 }
 
index f31bc4c4864411ae8bd3267d27575c5444ac71e1..6d02e3b063756f6225078df7e00f981478ef5f45 100644 (file)
@@ -810,20 +810,20 @@ static int __init coretemp_init(void)
        if (err)
                goto exit;
 
-       get_online_cpus();
+       cpu_notifier_register_begin();
        for_each_online_cpu(i)
                get_core_online(i);
 
 #ifndef CONFIG_HOTPLUG_CPU
        if (list_empty(&pdev_list)) {
-               put_online_cpus();
+               cpu_notifier_register_done();
                err = -ENODEV;
                goto exit_driver_unreg;
        }
 #endif
 
-       register_hotcpu_notifier(&coretemp_cpu_notifier);
-       put_online_cpus();
+       __register_hotcpu_notifier(&coretemp_cpu_notifier);
+       cpu_notifier_register_done();
        return 0;
 
 #ifndef CONFIG_HOTPLUG_CPU
@@ -838,8 +838,8 @@ static void __exit coretemp_exit(void)
 {
        struct pdev_entry *p, *n;
 
-       get_online_cpus();
-       unregister_hotcpu_notifier(&coretemp_cpu_notifier);
+       cpu_notifier_register_begin();
+       __unregister_hotcpu_notifier(&coretemp_cpu_notifier);
        mutex_lock(&pdev_list_mutex);
        list_for_each_entry_safe(p, n, &pdev_list, list) {
                platform_device_unregister(p->pdev);
@@ -847,7 +847,7 @@ static void __exit coretemp_exit(void)
                kfree(p);
        }
        mutex_unlock(&pdev_list_mutex);
-       put_online_cpus();
+       cpu_notifier_register_done();
        platform_driver_unregister(&coretemp_driver);
 }
 
index 38944e94f65fbd27692100d4d78b2868f13a2982..8df43c51de2c2d14434533cff15bc371f3d3a245 100644 (file)
@@ -319,7 +319,7 @@ static int __init via_cputemp_init(void)
        if (err)
                goto exit;
 
-       get_online_cpus();
+       cpu_notifier_register_begin();
        for_each_online_cpu(i) {
                struct cpuinfo_x86 *c = &cpu_data(i);
 
@@ -339,14 +339,14 @@ static int __init via_cputemp_init(void)
 
 #ifndef CONFIG_HOTPLUG_CPU
        if (list_empty(&pdev_list)) {
-               put_online_cpus();
+               cpu_notifier_register_done();
                err = -ENODEV;
                goto exit_driver_unreg;
        }
 #endif
 
-       register_hotcpu_notifier(&via_cputemp_cpu_notifier);
-       put_online_cpus();
+       __register_hotcpu_notifier(&via_cputemp_cpu_notifier);
+       cpu_notifier_register_done();
        return 0;
 
 #ifndef CONFIG_HOTPLUG_CPU
@@ -361,8 +361,8 @@ static void __exit via_cputemp_exit(void)
 {
        struct pdev_entry *p, *n;
 
-       get_online_cpus();
-       unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
+       cpu_notifier_register_begin();
+       __unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
        mutex_lock(&pdev_list_mutex);
        list_for_each_entry_safe(p, n, &pdev_list, list) {
                platform_device_unregister(p->pdev);
@@ -370,7 +370,7 @@ static void __exit via_cputemp_exit(void)
                kfree(p);
        }
        mutex_unlock(&pdev_list_mutex);
-       put_online_cpus();
+       cpu_notifier_register_done();
        platform_driver_unregister(&via_cputemp_driver);
 }
 
index 8e1939f564f4ae16f44d1f1b06e80744d303fa0b..51493ed4643b019a1cd80e423485a75f363b8dbe 100644 (file)
@@ -681,14 +681,19 @@ static int __init intel_idle_init(void)
        if (intel_idle_cpuidle_devices == NULL)
                return -ENOMEM;
 
+       cpu_notifier_register_begin();
+
        for_each_online_cpu(i) {
                retval = intel_idle_cpu_init(i);
                if (retval) {
+                       cpu_notifier_register_done();
                        cpuidle_unregister_driver(&intel_idle_driver);
                        return retval;
                }
        }
-       register_cpu_notifier(&cpu_hotplug_notifier);
+       __register_cpu_notifier(&cpu_hotplug_notifier);
+
+       cpu_notifier_register_done();
 
        return 0;
 }
@@ -698,10 +703,13 @@ static void __exit intel_idle_exit(void)
        intel_idle_cpuidle_devices_uninit();
        cpuidle_unregister_driver(&intel_idle_driver);
 
+       cpu_notifier_register_begin();
 
        if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
                on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
-       unregister_cpu_notifier(&cpu_hotplug_notifier);
+       __unregister_cpu_notifier(&cpu_hotplug_notifier);
+
+       cpu_notifier_register_done();
 
        return;
 }
index 76f1c9357f39d2a285c6bd8af17eb2c4aac5dcad..9559829fb234de4088602ca6e942e8dd11bd43ff 100644 (file)
@@ -108,8 +108,8 @@ static void nmi_timer_shutdown(void)
        struct perf_event *event;
        int cpu;
 
-       get_online_cpus();
-       unregister_cpu_notifier(&nmi_timer_cpu_nb);
+       cpu_notifier_register_begin();
+       __unregister_cpu_notifier(&nmi_timer_cpu_nb);
        for_each_possible_cpu(cpu) {
                event = per_cpu(nmi_timer_events, cpu);
                if (!event)
@@ -119,7 +119,7 @@ static void nmi_timer_shutdown(void)
                perf_event_release_kernel(event);
        }
 
-       put_online_cpus();
+       cpu_notifier_register_done();
 }
 
 static int nmi_timer_setup(void)
@@ -132,20 +132,23 @@ static int nmi_timer_setup(void)
        do_div(period, HZ);
        nmi_timer_attr.sample_period = period;
 
-       get_online_cpus();
-       err = register_cpu_notifier(&nmi_timer_cpu_nb);
+       cpu_notifier_register_begin();
+       err = __register_cpu_notifier(&nmi_timer_cpu_nb);
        if (err)
                goto out;
+
        /* can't attach events to offline cpus: */
        for_each_online_cpu(cpu) {
                err = nmi_timer_start_cpu(cpu);
-               if (err)
-                       break;
+               if (err) {
+                       cpu_notifier_register_done();
+                       nmi_timer_shutdown();
+                       return err;
+               }
        }
-       if (err)
-               nmi_timer_shutdown();
+
 out:
-       put_online_cpus();
+       cpu_notifier_register_done();
        return err;
 }
 
index 61b51e17d932a5c81db81fd99f46c7411dcf79c6..d9a0770b6c73d24b65ae35fce4bf3ac2a7f43343 100644 (file)
@@ -1374,6 +1374,9 @@ static int __init rapl_init(void)
 
                return -ENODEV;
        }
+
+       cpu_notifier_register_begin();
+
        /* prevent CPU hotplug during detection */
        get_online_cpus();
        ret = rapl_detect_topology();
@@ -1385,20 +1388,23 @@ static int __init rapl_init(void)
                ret = -ENODEV;
                goto done;
        }
-       register_hotcpu_notifier(&rapl_cpu_notifier);
+       __register_hotcpu_notifier(&rapl_cpu_notifier);
 done:
        put_online_cpus();
+       cpu_notifier_register_done();
 
        return ret;
 }
 
 static void __exit rapl_exit(void)
 {
+       cpu_notifier_register_begin();
        get_online_cpus();
-       unregister_hotcpu_notifier(&rapl_cpu_notifier);
+       __unregister_hotcpu_notifier(&rapl_cpu_notifier);
        rapl_unregister_powercap();
        rapl_cleanup_data();
        put_online_cpus();
+       cpu_notifier_register_done();
 }
 
 module_init(rapl_init);
index 6287f6a8b79d6c37b3b93d3509c11350c57d29bd..1d41f4b9114f8253e780d279799dad0ac0d27e04 100644 (file)
@@ -2592,12 +2592,16 @@ static int __init bnx2fc_mod_init(void)
                spin_lock_init(&p->fp_work_lock);
        }
 
+       cpu_notifier_register_begin();
+
        for_each_online_cpu(cpu) {
                bnx2fc_percpu_thread_create(cpu);
        }
 
        /* Initialize per CPU interrupt thread */
-       register_hotcpu_notifier(&bnx2fc_cpu_notifier);
+       __register_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+       cpu_notifier_register_done();
 
        cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
 
@@ -2662,13 +2666,17 @@ static void __exit bnx2fc_mod_exit(void)
        if (l2_thread)
                kthread_stop(l2_thread);
 
-       unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
+       cpu_notifier_register_begin();
 
        /* Destroy per cpu threads */
        for_each_online_cpu(cpu) {
                bnx2fc_percpu_thread_destroy(cpu);
        }
 
+       __unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+       cpu_notifier_register_done();
+
        destroy_workqueue(bnx2fc_wq);
        /*
         * detach from scsi transport
index 34c294b42c84e0353d4049737a72c9d3b57dc61a..80c03b452d61960cbe3df5d198fc409e7c49a9e9 100644 (file)
@@ -537,11 +537,15 @@ static int __init bnx2i_mod_init(void)
                p->iothread = NULL;
        }
 
+       cpu_notifier_register_begin();
+
        for_each_online_cpu(cpu)
                bnx2i_percpu_thread_create(cpu);
 
        /* Initialize per CPU interrupt thread */
-       register_hotcpu_notifier(&bnx2i_cpu_notifier);
+       __register_hotcpu_notifier(&bnx2i_cpu_notifier);
+
+       cpu_notifier_register_done();
 
        return 0;
 
@@ -581,11 +585,15 @@ static void __exit bnx2i_mod_exit(void)
        }
        mutex_unlock(&bnx2i_dev_lock);
 
-       unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
+       cpu_notifier_register_begin();
 
        for_each_online_cpu(cpu)
                bnx2i_percpu_thread_destroy(cpu);
 
+       __unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
+
+       cpu_notifier_register_done();
+
        iscsi_unregister_transport(&bnx2i_iscsi_transport);
        cnic_unregister_driver(CNIC_ULP_ISCSI);
 }
index f3170008ae71b9b84de3c05b443a600d96e4396d..d5e105b173f0cf121894fcb5105a5afedadc16d5 100644 (file)
@@ -2633,14 +2633,18 @@ static int __init fcoe_init(void)
                skb_queue_head_init(&p->fcoe_rx_list);
        }
 
+       cpu_notifier_register_begin();
+
        for_each_online_cpu(cpu)
                fcoe_percpu_thread_create(cpu);
 
        /* Initialize per CPU interrupt thread */
-       rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
+       rc = __register_hotcpu_notifier(&fcoe_cpu_notifier);
        if (rc)
                goto out_free;
 
+       cpu_notifier_register_done();
+
        /* Setup link change notification */
        fcoe_dev_setup();
 
@@ -2655,6 +2659,9 @@ out_free:
        for_each_online_cpu(cpu) {
                fcoe_percpu_thread_destroy(cpu);
        }
+
+       cpu_notifier_register_done();
+
        mutex_unlock(&fcoe_config_mutex);
        destroy_workqueue(fcoe_wq);
        return rc;
@@ -2687,11 +2694,15 @@ static void __exit fcoe_exit(void)
        }
        rtnl_unlock();
 
-       unregister_hotcpu_notifier(&fcoe_cpu_notifier);
+       cpu_notifier_register_begin();
 
        for_each_online_cpu(cpu)
                fcoe_percpu_thread_destroy(cpu);
 
+       __unregister_hotcpu_notifier(&fcoe_cpu_notifier);
+
+       cpu_notifier_register_done();
+
        mutex_unlock(&fcoe_config_mutex);
 
        /*
index 081fd7e6a9f070c683deaea799437a2e459b94f4..9ea3d9d49ffc55d4fc8c9679a5e3e88e7e5549f4 100644 (file)
@@ -590,12 +590,12 @@ static int __init pkg_temp_thermal_init(void)
        platform_thermal_package_rate_control =
                        pkg_temp_thermal_platform_thermal_rate_control;
 
-       get_online_cpus();
+       cpu_notifier_register_begin();
        for_each_online_cpu(i)
                if (get_core_online(i))
                        goto err_ret;
-       register_hotcpu_notifier(&pkg_temp_thermal_notifier);
-       put_online_cpus();
+       __register_hotcpu_notifier(&pkg_temp_thermal_notifier);
+       cpu_notifier_register_done();
 
        pkg_temp_debugfs_init(); /* Don't care if fails */
 
@@ -604,7 +604,7 @@ static int __init pkg_temp_thermal_init(void)
 err_ret:
        for_each_online_cpu(i)
                put_core_offline(i);
-       put_online_cpus();
+       cpu_notifier_register_done();
        kfree(pkg_work_scheduled);
        platform_thermal_package_notify = NULL;
        platform_thermal_package_rate_control = NULL;
@@ -617,8 +617,8 @@ static void __exit pkg_temp_thermal_exit(void)
        struct phy_dev_entry *phdev, *n;
        int i;
 
-       get_online_cpus();
-       unregister_hotcpu_notifier(&pkg_temp_thermal_notifier);
+       cpu_notifier_register_begin();
+       __unregister_hotcpu_notifier(&pkg_temp_thermal_notifier);
        mutex_lock(&phy_dev_list_mutex);
        list_for_each_entry_safe(phdev, n, &phy_dev_list, list) {
                /* Retore old MSR value for package thermal interrupt */
@@ -636,7 +636,7 @@ static void __exit pkg_temp_thermal_exit(void)
        for_each_online_cpu(i)
                cancel_delayed_work_sync(
                        &per_cpu(pkg_temp_thermal_threshold_work, i));
-       put_online_cpus();
+       cpu_notifier_register_done();
 
        kfree(pkg_work_scheduled);
 
index 461208831428e4da609606282af2b56f4c3c81f4..4baf2d788920484f5f9fe8d752d9f51ecf7b63fb 100644 (file)
@@ -708,10 +708,13 @@ static int __init octeon_wdt_init(void)
 
        cpumask_clear(&irq_enabled_cpus);
 
+       cpu_notifier_register_begin();
        for_each_online_cpu(cpu)
                octeon_wdt_setup_interrupt(cpu);
 
-       register_hotcpu_notifier(&octeon_wdt_cpu_notifier);
+       __register_hotcpu_notifier(&octeon_wdt_cpu_notifier);
+       cpu_notifier_register_done();
+
 out:
        return ret;
 }
@@ -725,7 +728,8 @@ static void __exit octeon_wdt_cleanup(void)
 
        misc_deregister(&octeon_wdt_miscdev);
 
-       unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier);
+       cpu_notifier_register_begin();
+       __unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier);
 
        for_each_online_cpu(cpu) {
                int core = cpu2core(cpu);
@@ -734,6 +738,9 @@ static void __exit octeon_wdt_cleanup(void)
                /* Free the interrupt handler */
                free_irq(OCTEON_IRQ_WDOG0 + core, octeon_wdt_poke_irq);
        }
+
+       cpu_notifier_register_done();
+
        /*
         * Disable the boot-bus memory, the code it points to is soon
         * to go missing.
index 61a6ac8fa8fc7ab00dcc7c33cea47981f2509d4b..b7a506f2bb144e1c2e59b0f84c4c736d90dd830b 100644 (file)
@@ -604,19 +604,29 @@ static void __init balloon_add_region(unsigned long start_pfn,
        }
 }
 
+static int alloc_balloon_scratch_page(int cpu)
+{
+       if (per_cpu(balloon_scratch_page, cpu) != NULL)
+               return 0;
+
+       per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
+       if (per_cpu(balloon_scratch_page, cpu) == NULL) {
+               pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+
 static int balloon_cpu_notify(struct notifier_block *self,
                                    unsigned long action, void *hcpu)
 {
        int cpu = (long)hcpu;
        switch (action) {
        case CPU_UP_PREPARE:
-               if (per_cpu(balloon_scratch_page, cpu) != NULL)
-                       break;
-               per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
-               if (per_cpu(balloon_scratch_page, cpu) == NULL) {
-                       pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
+               if (alloc_balloon_scratch_page(cpu))
                        return NOTIFY_BAD;
-               }
                break;
        default:
                break;
@@ -636,15 +646,17 @@ static int __init balloon_init(void)
                return -ENODEV;
 
        if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-               for_each_online_cpu(cpu)
-               {
-                       per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
-                       if (per_cpu(balloon_scratch_page, cpu) == NULL) {
-                               pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
+               register_cpu_notifier(&balloon_cpu_notifier);
+
+               get_online_cpus();
+               for_each_online_cpu(cpu) {
+                       if (alloc_balloon_scratch_page(cpu)) {
+                               put_online_cpus();
+                               unregister_cpu_notifier(&balloon_cpu_notifier);
                                return -ENOMEM;
                        }
                }
-               register_cpu_notifier(&balloon_cpu_notifier);
+               put_online_cpus();
        }
 
        pr_info("Initialising balloon driver\n");
index 03e962e23eaf65aad5d161c528ae6a43cdc0ffed..81887120395c81347c6a74ad3c92d847f3c4c90d 100644 (file)
@@ -115,26 +115,46 @@ enum {
                { .notifier_call = fn, .priority = pri };       \
        register_cpu_notifier(&fn##_nb);                        \
 }
+
+#define __cpu_notifier(fn, pri) {                              \
+       static struct notifier_block fn##_nb =                  \
+               { .notifier_call = fn, .priority = pri };       \
+       __register_cpu_notifier(&fn##_nb);                      \
+}
 #else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
 #define cpu_notifier(fn, pri)  do { (void)(fn); } while (0)
+#define __cpu_notifier(fn, pri)        do { (void)(fn); } while (0)
 #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
+
 #ifdef CONFIG_HOTPLUG_CPU
 extern int register_cpu_notifier(struct notifier_block *nb);
+extern int __register_cpu_notifier(struct notifier_block *nb);
 extern void unregister_cpu_notifier(struct notifier_block *nb);
+extern void __unregister_cpu_notifier(struct notifier_block *nb);
 #else
 
 #ifndef MODULE
 extern int register_cpu_notifier(struct notifier_block *nb);
+extern int __register_cpu_notifier(struct notifier_block *nb);
 #else
 static inline int register_cpu_notifier(struct notifier_block *nb)
 {
        return 0;
 }
+
+static inline int __register_cpu_notifier(struct notifier_block *nb)
+{
+       return 0;
+}
 #endif
 
 static inline void unregister_cpu_notifier(struct notifier_block *nb)
 {
 }
+
+static inline void __unregister_cpu_notifier(struct notifier_block *nb)
+{
+}
 #endif
 
 int cpu_up(unsigned int cpu);
@@ -142,19 +162,32 @@ void notify_cpu_starting(unsigned int cpu);
 extern void cpu_maps_update_begin(void);
 extern void cpu_maps_update_done(void);
 
+#define cpu_notifier_register_begin    cpu_maps_update_begin
+#define cpu_notifier_register_done     cpu_maps_update_done
+
 #else  /* CONFIG_SMP */
 
 #define cpu_notifier(fn, pri)  do { (void)(fn); } while (0)
+#define __cpu_notifier(fn, pri)        do { (void)(fn); } while (0)
 
 static inline int register_cpu_notifier(struct notifier_block *nb)
 {
        return 0;
 }
 
+static inline int __register_cpu_notifier(struct notifier_block *nb)
+{
+       return 0;
+}
+
 static inline void unregister_cpu_notifier(struct notifier_block *nb)
 {
 }
 
+static inline void __unregister_cpu_notifier(struct notifier_block *nb)
+{
+}
+
 static inline void cpu_maps_update_begin(void)
 {
 }
@@ -163,6 +196,14 @@ static inline void cpu_maps_update_done(void)
 {
 }
 
+static inline void cpu_notifier_register_begin(void)
+{
+}
+
+static inline void cpu_notifier_register_done(void)
+{
+}
+
 #endif /* CONFIG_SMP */
 extern struct bus_type cpu_subsys;
 
@@ -176,8 +217,11 @@ extern void put_online_cpus(void);
 extern void cpu_hotplug_disable(void);
 extern void cpu_hotplug_enable(void);
 #define hotcpu_notifier(fn, pri)       cpu_notifier(fn, pri)
+#define __hotcpu_notifier(fn, pri)     __cpu_notifier(fn, pri)
 #define register_hotcpu_notifier(nb)   register_cpu_notifier(nb)
+#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
 #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
+#define __unregister_hotcpu_notifier(nb)       __unregister_cpu_notifier(nb)
 void clear_tasks_mm_cpumask(int cpu);
 int cpu_down(unsigned int cpu);
 
@@ -190,9 +234,12 @@ static inline void cpu_hotplug_done(void) {}
 #define cpu_hotplug_disable()  do { } while (0)
 #define cpu_hotplug_enable()   do { } while (0)
 #define hotcpu_notifier(fn, pri)       do { (void)(fn); } while (0)
+#define __hotcpu_notifier(fn, pri)     do { (void)(fn); } while (0)
 /* These aren't inline functions due to a GCC bug. */
 #define register_hotcpu_notifier(nb)   ({ (void)(nb); 0; })
+#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
 #define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
+#define __unregister_hotcpu_notifier(nb)       ({ (void)(nb); })
 #endif         /* CONFIG_HOTPLUG_CPU */
 
 #ifdef CONFIG_PM_SLEEP_SMP
index e56b07f5c9b67f0119d874b60a8a6bed9fda7e99..3356abcfff184e707eccb08d6f99042a8fd51acc 100644 (file)
@@ -835,6 +835,8 @@ do {                                                                        \
                { .notifier_call = fn, .priority = CPU_PRI_PERF };      \
        unsigned long cpu = smp_processor_id();                         \
        unsigned long flags;                                            \
+                                                                       \
+       cpu_notifier_register_begin();                                  \
        fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,                     \
                (void *)(unsigned long)cpu);                            \
        local_irq_save(flags);                                          \
@@ -843,9 +845,21 @@ do {                                                                       \
        local_irq_restore(flags);                                       \
        fn(&fn##_nb, (unsigned long)CPU_ONLINE,                         \
                (void *)(unsigned long)cpu);                            \
-       register_cpu_notifier(&fn##_nb);                                \
+       __register_cpu_notifier(&fn##_nb);                              \
+       cpu_notifier_register_done();                                   \
 } while (0)
 
+/*
+ * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
+ * callback for already online CPUs.
+ */
+#define __perf_cpu_notifier(fn)                                                \
+do {                                                                   \
+       static struct notifier_block fn##_nb =                          \
+               { .notifier_call = fn, .priority = CPU_PRI_PERF };      \
+                                                                       \
+       __register_cpu_notifier(&fn##_nb);                              \
+} while (0)
 
 struct perf_pmu_events_attr {
        struct device_attribute attr;
index deff2e693766997a259b4b3ac2fc4f789e554f1d..a9e710eef0e2543f063ee8a1c06952f2fb3e0891 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/mutex.h>
 #include <linux/gfp.h>
 #include <linux/suspend.h>
+#include <linux/lockdep.h>
 
 #include "smpboot.h"
 
 static DEFINE_MUTEX(cpu_add_remove_lock);
 
 /*
- * The following two API's must be used when attempting
- * to serialize the updates to cpu_online_mask, cpu_present_mask.
+ * The following two APIs (cpu_maps_update_begin/done) must be used when
+ * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
+ * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
+ * hotplug callback (un)registration performed using __register_cpu_notifier()
+ * or __unregister_cpu_notifier().
  */
 void cpu_maps_update_begin(void)
 {
        mutex_lock(&cpu_add_remove_lock);
 }
+EXPORT_SYMBOL(cpu_notifier_register_begin);
 
 void cpu_maps_update_done(void)
 {
        mutex_unlock(&cpu_add_remove_lock);
 }
+EXPORT_SYMBOL(cpu_notifier_register_done);
 
 static RAW_NOTIFIER_HEAD(cpu_chain);
 
@@ -57,17 +63,30 @@ static struct {
         * an ongoing cpu hotplug operation.
         */
        int refcount;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map dep_map;
+#endif
 } cpu_hotplug = {
        .active_writer = NULL,
        .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
        .refcount = 0,
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       .dep_map = {.name = "cpu_hotplug.lock" },
+#endif
 };
 
+/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
+#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
+#define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
+#define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
+
 void get_online_cpus(void)
 {
        might_sleep();
        if (cpu_hotplug.active_writer == current)
                return;
+       cpuhp_lock_acquire_read();
        mutex_lock(&cpu_hotplug.lock);
        cpu_hotplug.refcount++;
        mutex_unlock(&cpu_hotplug.lock);
@@ -87,6 +106,7 @@ void put_online_cpus(void)
        if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
                wake_up_process(cpu_hotplug.active_writer);
        mutex_unlock(&cpu_hotplug.lock);
+       cpuhp_lock_release();
 
 }
 EXPORT_SYMBOL_GPL(put_online_cpus);
@@ -117,6 +137,7 @@ void cpu_hotplug_begin(void)
 {
        cpu_hotplug.active_writer = current;
 
+       cpuhp_lock_acquire();
        for (;;) {
                mutex_lock(&cpu_hotplug.lock);
                if (likely(!cpu_hotplug.refcount))
@@ -131,6 +152,7 @@ void cpu_hotplug_done(void)
 {
        cpu_hotplug.active_writer = NULL;
        mutex_unlock(&cpu_hotplug.lock);
+       cpuhp_lock_release();
 }
 
 /*
@@ -166,6 +188,11 @@ int __ref register_cpu_notifier(struct notifier_block *nb)
        return ret;
 }
 
+int __ref __register_cpu_notifier(struct notifier_block *nb)
+{
+       return raw_notifier_chain_register(&cpu_chain, nb);
+}
+
 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
                        int *nr_calls)
 {
@@ -189,6 +216,7 @@ static void cpu_notify_nofail(unsigned long val, void *v)
        BUG_ON(cpu_notify(val, v));
 }
 EXPORT_SYMBOL(register_cpu_notifier);
+EXPORT_SYMBOL(__register_cpu_notifier);
 
 void __ref unregister_cpu_notifier(struct notifier_block *nb)
 {
@@ -198,6 +226,12 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL(unregister_cpu_notifier);
 
+void __ref __unregister_cpu_notifier(struct notifier_block *nb)
+{
+       raw_notifier_chain_unregister(&cpu_chain, nb);
+}
+EXPORT_SYMBOL(__unregister_cpu_notifier);
+
 /**
  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
  * @cpu: a CPU id
index 1b266dbe755a983e0bca02bb50ca1ab47419488d..cb980f0c731b72ef3df3ff6b9afa2f6f44d15b9c 100644 (file)
@@ -591,18 +591,28 @@ out_cleanup:
 int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
 {
        struct proc_dir_entry *entry;
+       int err = 0;
 
        if (!prof_on)
                return 0;
-       if (create_hash_tables())
-               return -ENOMEM;
+
+       cpu_notifier_register_begin();
+
+       if (create_hash_tables()) {
+               err = -ENOMEM;
+               goto out;
+       }
+
        entry = proc_create("profile", S_IWUSR | S_IRUGO,
                            NULL, &proc_profile_operations);
        if (!entry)
-               return 0;
+               goto out;
        proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
-       hotcpu_notifier(profile_cpu_callback, 0);
-       return 0;
+       __hotcpu_notifier(profile_cpu_callback, 0);
+
+out:
+       cpu_notifier_register_done();
+       return err;
 }
 subsys_initcall(create_proc_profile);
 #endif /* CONFIG_PROC_FS */
index fc4da2d97f9b6e280b2e29a525e478958cb495d4..c634868c2921ca39837c3a36773c36fa4a3273d5 100644 (file)
@@ -1301,7 +1301,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
         * In that off case, we need to allocate for all possible cpus.
         */
 #ifdef CONFIG_HOTPLUG_CPU
-       get_online_cpus();
+       cpu_notifier_register_begin();
        cpumask_copy(buffer->cpumask, cpu_online_mask);
 #else
        cpumask_copy(buffer->cpumask, cpu_possible_mask);
@@ -1324,10 +1324,10 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
 #ifdef CONFIG_HOTPLUG_CPU
        buffer->cpu_notify.notifier_call = rb_cpu_notify;
        buffer->cpu_notify.priority = 0;
-       register_cpu_notifier(&buffer->cpu_notify);
+       __register_cpu_notifier(&buffer->cpu_notify);
+       cpu_notifier_register_done();
 #endif
 
-       put_online_cpus();
        mutex_init(&buffer->mutex);
 
        return buffer;
@@ -1341,7 +1341,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
 
  fail_free_cpumask:
        free_cpumask_var(buffer->cpumask);
-       put_online_cpus();
+#ifdef CONFIG_HOTPLUG_CPU
+       cpu_notifier_register_done();
+#endif
 
  fail_free_buffer:
        kfree(buffer);
@@ -1358,16 +1360,17 @@ ring_buffer_free(struct ring_buffer *buffer)
 {
        int cpu;
 
-       get_online_cpus();
-
 #ifdef CONFIG_HOTPLUG_CPU
-       unregister_cpu_notifier(&buffer->cpu_notify);
+       cpu_notifier_register_begin();
+       __unregister_cpu_notifier(&buffer->cpu_notify);
 #endif
 
        for_each_buffer_cpu(buffer, cpu)
                rb_free_cpu_buffer(buffer->buffers[cpu]);
 
-       put_online_cpus();
+#ifdef CONFIG_HOTPLUG_CPU
+       cpu_notifier_register_done();
+#endif
 
        kfree(buffer->buffers);
        free_cpumask_var(buffer->cpumask);
index 197b4c4a95879832e29e7b933b70c2f4f6e01d17..302dd076b8bf47bfb13a7925166b1b1f4e0dc5eb 100644 (file)
@@ -1298,14 +1298,14 @@ static int __init setup_vmstat(void)
 #ifdef CONFIG_SMP
        int cpu;
 
-       register_cpu_notifier(&vmstat_notifier);
+       cpu_notifier_register_begin();
+       __register_cpu_notifier(&vmstat_notifier);
 
-       get_online_cpus();
        for_each_online_cpu(cpu) {
                start_cpu_timer(cpu);
                node_set_state(cpu_to_node(cpu), N_CPU);
        }
-       put_online_cpus();
+       cpu_notifier_register_done();
 #endif
 #ifdef CONFIG_PROC_FS
        proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
index c03ca5e9fe15c8b9725db0ac41b6ba78f64b0e5d..36b4591a7a2d3b2eca7111b26bee3efa19c7c5c5 100644 (file)
@@ -814,21 +814,32 @@ static void zs_exit(void)
 {
        int cpu;
 
+       cpu_notifier_register_begin();
+
        for_each_online_cpu(cpu)
                zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
-       unregister_cpu_notifier(&zs_cpu_nb);
+       __unregister_cpu_notifier(&zs_cpu_nb);
+
+       cpu_notifier_register_done();
 }
 
 static int zs_init(void)
 {
        int cpu, ret;
 
-       register_cpu_notifier(&zs_cpu_nb);
+       cpu_notifier_register_begin();
+
+       __register_cpu_notifier(&zs_cpu_nb);
        for_each_online_cpu(cpu) {
                ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
-               if (notifier_to_errno(ret))
+               if (notifier_to_errno(ret)) {
+                       cpu_notifier_register_done();
                        goto fail;
+               }
        }
+
+       cpu_notifier_register_done();
+
        return 0;
 fail:
        zs_exit();
index e55bab9dc41f81ab1b6384710e918f3839e0c936..d7337fbf66053219ab0e72e5e9b2c0b7e17d2bb7 100644 (file)
@@ -387,18 +387,18 @@ static int zswap_cpu_init(void)
 {
        unsigned long cpu;
 
-       get_online_cpus();
+       cpu_notifier_register_begin();
        for_each_online_cpu(cpu)
                if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK)
                        goto cleanup;
-       register_cpu_notifier(&zswap_cpu_notifier_block);
-       put_online_cpus();
+       __register_cpu_notifier(&zswap_cpu_notifier_block);
+       cpu_notifier_register_done();
        return 0;
 
 cleanup:
        for_each_online_cpu(cpu)
                __zswap_cpu_notifier(CPU_UP_CANCELED, cpu);
-       put_online_cpus();
+       cpu_notifier_register_done();
        return -ENOMEM;
 }
 
index 31cfb365e0c689ffa528bf2f96072c6bcbc82799..a0348fde1fdfe89844352702565d35ef8d706f24 100644 (file)
@@ -455,6 +455,8 @@ int flow_cache_init(struct net *net)
        if (!fc->percpu)
                return -ENOMEM;
 
+       cpu_notifier_register_begin();
+
        for_each_online_cpu(i) {
                if (flow_cache_cpu_prepare(fc, i))
                        goto err;
@@ -462,7 +464,9 @@ int flow_cache_init(struct net *net)
        fc->hotcpu_notifier = (struct notifier_block){
                .notifier_call = flow_cache_cpu,
        };
-       register_hotcpu_notifier(&fc->hotcpu_notifier);
+       __register_hotcpu_notifier(&fc->hotcpu_notifier);
+
+       cpu_notifier_register_done();
 
        setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
                    (unsigned long) fc);
@@ -478,6 +482,8 @@ err:
                fcp->hash_table = NULL;
        }
 
+       cpu_notifier_register_done();
+
        free_percpu(fc->percpu);
        fc->percpu = NULL;
 
index cd5b8ec9be0459db10432aad57836f96b3388d63..79a0ce95799fb26e7b96402afef67f4085dd10bf 100644 (file)
@@ -621,6 +621,42 @@ static void iucv_disable(void)
        put_online_cpus();
 }
 
+static void free_iucv_data(int cpu)
+{
+       kfree(iucv_param_irq[cpu]);
+       iucv_param_irq[cpu] = NULL;
+       kfree(iucv_param[cpu]);
+       iucv_param[cpu] = NULL;
+       kfree(iucv_irq_data[cpu]);
+       iucv_irq_data[cpu] = NULL;
+}
+
+static int alloc_iucv_data(int cpu)
+{
+       /* Note: GFP_DMA used to get memory below 2G */
+       iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
+                            GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
+       if (!iucv_irq_data[cpu])
+               goto out_free;
+
+       /* Allocate parameter blocks. */
+       iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
+                         GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
+       if (!iucv_param[cpu])
+               goto out_free;
+
+       iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
+                         GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
+       if (!iucv_param_irq[cpu])
+               goto out_free;
+
+       return 0;
+
+out_free:
+       free_iucv_data(cpu);
+       return -ENOMEM;
+}
+
 static int iucv_cpu_notify(struct notifier_block *self,
                                     unsigned long action, void *hcpu)
 {
@@ -630,38 +666,14 @@ static int iucv_cpu_notify(struct notifier_block *self,
        switch (action) {
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
-                                       GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
-               if (!iucv_irq_data[cpu])
-                       return notifier_from_errno(-ENOMEM);
-
-               iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
-                                    GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
-               if (!iucv_param[cpu]) {
-                       kfree(iucv_irq_data[cpu]);
-                       iucv_irq_data[cpu] = NULL;
+               if (alloc_iucv_data(cpu))
                        return notifier_from_errno(-ENOMEM);
-               }
-               iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
-                                       GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
-               if (!iucv_param_irq[cpu]) {
-                       kfree(iucv_param[cpu]);
-                       iucv_param[cpu] = NULL;
-                       kfree(iucv_irq_data[cpu]);
-                       iucv_irq_data[cpu] = NULL;
-                       return notifier_from_errno(-ENOMEM);
-               }
                break;
        case CPU_UP_CANCELED:
        case CPU_UP_CANCELED_FROZEN:
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
-               kfree(iucv_param_irq[cpu]);
-               iucv_param_irq[cpu] = NULL;
-               kfree(iucv_param[cpu]);
-               iucv_param[cpu] = NULL;
-               kfree(iucv_irq_data[cpu]);
-               iucv_irq_data[cpu] = NULL;
+               free_iucv_data(cpu);
                break;
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
@@ -2025,33 +2037,20 @@ static int __init iucv_init(void)
                goto out_int;
        }
 
-       for_each_online_cpu(cpu) {
-               /* Note: GFP_DMA used to get memory below 2G */
-               iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
-                                    GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
-               if (!iucv_irq_data[cpu]) {
-                       rc = -ENOMEM;
-                       goto out_free;
-               }
+       cpu_notifier_register_begin();
 
-               /* Allocate parameter blocks. */
-               iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
-                                 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
-               if (!iucv_param[cpu]) {
-                       rc = -ENOMEM;
-                       goto out_free;
-               }
-               iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
-                                 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
-               if (!iucv_param_irq[cpu]) {
+       for_each_online_cpu(cpu) {
+               if (alloc_iucv_data(cpu)) {
                        rc = -ENOMEM;
                        goto out_free;
                }
-
        }
-       rc = register_hotcpu_notifier(&iucv_cpu_notifier);
+       rc = __register_hotcpu_notifier(&iucv_cpu_notifier);
        if (rc)
                goto out_free;
+
+       cpu_notifier_register_done();
+
        rc = register_reboot_notifier(&iucv_reboot_notifier);
        if (rc)
                goto out_cpu;
@@ -2069,16 +2068,14 @@ static int __init iucv_init(void)
 out_reboot:
        unregister_reboot_notifier(&iucv_reboot_notifier);
 out_cpu:
-       unregister_hotcpu_notifier(&iucv_cpu_notifier);
+       cpu_notifier_register_begin();
+       __unregister_hotcpu_notifier(&iucv_cpu_notifier);
 out_free:
-       for_each_possible_cpu(cpu) {
-               kfree(iucv_param_irq[cpu]);
-               iucv_param_irq[cpu] = NULL;
-               kfree(iucv_param[cpu]);
-               iucv_param[cpu] = NULL;
-               kfree(iucv_irq_data[cpu]);
-               iucv_irq_data[cpu] = NULL;
-       }
+       for_each_possible_cpu(cpu)
+               free_iucv_data(cpu);
+
+       cpu_notifier_register_done();
+
        root_device_unregister(iucv_root);
 out_int:
        unregister_external_interrupt(0x4000, iucv_external_interrupt);
@@ -2105,15 +2102,11 @@ static void __exit iucv_exit(void)
                kfree(p);
        spin_unlock_irq(&iucv_queue_lock);
        unregister_reboot_notifier(&iucv_reboot_notifier);
-       unregister_hotcpu_notifier(&iucv_cpu_notifier);
-       for_each_possible_cpu(cpu) {
-               kfree(iucv_param_irq[cpu]);
-               iucv_param_irq[cpu] = NULL;
-               kfree(iucv_param[cpu]);
-               iucv_param[cpu] = NULL;
-               kfree(iucv_irq_data[cpu]);
-               iucv_irq_data[cpu] = NULL;
-       }
+       cpu_notifier_register_begin();
+       __unregister_hotcpu_notifier(&iucv_cpu_notifier);
+       for_each_possible_cpu(cpu)
+               free_iucv_data(cpu);
+       cpu_notifier_register_done();
        root_device_unregister(iucv_root);
        bus_unregister(&iucv_bus);
        unregister_external_interrupt(0x4000, iucv_external_interrupt);