]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/smp.c
Merge remote-tracking branch 'mmc/mmc-next'
[karo-tx-linux.git] / kernel / smp.c
index fe9f773d71146c82ad9c543ab0456490c9bebd0f..0564571dcdf726fab6832bf91417dfe2eff372fb 100644 (file)
@@ -48,10 +48,13 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
                                cpu_to_node(cpu)))
                        return notifier_from_errno(-ENOMEM);
                if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
-                               cpu_to_node(cpu)))
+                               cpu_to_node(cpu))) {
+                       free_cpumask_var(cfd->cpumask);
                        return notifier_from_errno(-ENOMEM);
+               }
                cfd->csd = alloc_percpu(struct call_single_data);
                if (!cfd->csd) {
+                       free_cpumask_var(cfd->cpumask_ipi);
                        free_cpumask_var(cfd->cpumask);
                        return notifier_from_errno(-ENOMEM);
                }
@@ -186,25 +189,13 @@ void generic_smp_call_function_single_interrupt(void)
 
        while (!list_empty(&list)) {
                struct call_single_data *csd;
-               unsigned int csd_flags;
 
                csd = list_entry(list.next, struct call_single_data, list);
                list_del(&csd->list);
 
-               /*
-                * 'csd' can be invalid after this call if flags == 0
-                * (when called through generic_exec_single()),
-                * so save them away before making the call:
-                */
-               csd_flags = csd->flags;
-
                csd->func(csd->info);
 
-               /*
-                * Unlocked CSDs are valid through generic_exec_single():
-                */
-               if (csd_flags & CSD_FLAG_LOCK)
-                       csd_unlock(csd);
+               csd_unlock(csd);
        }
 }
 
@@ -278,8 +269,6 @@ EXPORT_SYMBOL(smp_call_function_single);
  * @wait: If true, wait until function has completed.
  *
  * Returns 0 on success, else a negative status code (if no cpus were online).
- * Note that @wait will be implicitly turned on in case of allocation failures,
- * since we fall back to on-stack allocation.
  *
  * Selection preference:
  *     1) current cpu if in @mask
@@ -586,8 +575,10 @@ EXPORT_SYMBOL(on_each_cpu);
  *
  * If @wait is true, then returns once @func has returned.
  *
- * You must not call this function with disabled interrupts or
- * from a hardware interrupt handler or from a bottom half handler.
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.  The
+ * exception is that it may be used during early boot while
+ * early_boot_irqs_disabled is set.
  */
 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
                        void *info, bool wait)
@@ -596,9 +587,10 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
 
        smp_call_function_many(mask, func, info, wait);
        if (cpumask_test_cpu(cpu, mask)) {
-               local_irq_disable();
+               unsigned long flags;
+               local_irq_save(flags);
                func(info);
-               local_irq_enable();
+               local_irq_restore(flags);
        }
        put_cpu();
 }