]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
CPU hotplug, smp: Flush any pending IPI callbacks before CPU offline
authorSrivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Thu, 22 May 2014 00:44:06 +0000 (10:44 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 22 May 2014 00:44:06 +0000 (10:44 +1000)
During CPU offline, in the stop-machine loop, we use 2 separate stages to
disable interrupts, to ensure that the CPU going offline doesn't get any
new IPIs from the other CPUs after it has gone offline.

However, an IPI sent much earlier might arrive late on the target CPU
(possibly _after_ the CPU has gone offline) due to hardware latencies, and
due to this, the smp-call-function callbacks queued on the outgoing CPU
might not get noticed (and hence not executed) at all.

This is somewhat theoretical, but in any case, it makes sense to
explicitly loop through the call_single_queue and flush any pending
callbacks before the CPU goes completely offline.  So, flush the queued
smp-call-function callbacks in the MULTI_STOP_DISABLE_IRQ_ACTIVE stage,
after disabling interrupts on the active CPU.  This can be trivially
achieved by invoking the generic_smp_call_function_single_interrupt()
function itself (and since the outgoing CPU is still online at this point,
we won't trigger the "IPI to offline CPU" warning in this function; so we
are safe to call it here).

This way, we would have handled all the queued callbacks before going
offline, and also, no new IPIs can be sent by the other CPUs to the
outgoing CPU at that point, because they will all be executing the
stop-machine code with interrupts disabled.

Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Suggested-by: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Tejun Heo <tj@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/smp.h
kernel/smp.c
kernel/stop_machine.c

index 633f5edd7470c01518416fc93ca4b7d08b9d372c..e6b090d73c07f3c1c37d997d89c003e3da226a17 100644 (file)
@@ -151,6 +151,8 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
 
 static inline void kick_all_cpus_sync(void) {  }
 
+static inline void generic_smp_call_function_single_interrupt(void) { }
+
 #endif /* !SMP */
 
 /*
index 306f8180b0d53165c960d7844295d8c6a4d6151a..b765167406b3652c9ddea9ee8c02fbef6ed4d97f 100644 (file)
@@ -177,9 +177,18 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
        return 0;
 }
 
-/*
- * Invoked by arch to handle an IPI for call function single. Must be
- * called from the arch with interrupts disabled.
+/**
+ * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
+ *
+ * Invoked by arch to handle an IPI for call function single.
+ *
+ * This is also invoked by a CPU about to go offline, to flush any pending
+ * smp-call-function callbacks queued on this CPU (including those for which
+ * the source CPU's IPIs might not have been received on this CPU yet).
+ * This ensures that all pending IPI callbacks are run before the CPU goes
+ * completely offline.
+ *
+ * Must be called with interrupts disabled.
  */
 void generic_smp_call_function_single_interrupt(void)
 {
@@ -187,6 +196,8 @@ void generic_smp_call_function_single_interrupt(void)
        struct call_single_data *csd, *csd_next;
        static bool warned;
 
+       WARN_ON(!irqs_disabled());
+
        entry = llist_del_all(&__get_cpu_var(call_single_queue));
        entry = llist_reverse_order(entry);
 
index 9486c3ad5f77710a75540ca783a68fe23b82e159..9973a6928a06662c60ba3acf5b7fd6616be0255d 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/smpboot.h>
 #include <linux/atomic.h>
 #include <linux/lglock.h>
+#include <linux/smp.h>
 
 /*
  * Structure to determine completion condition and record errors.  May
@@ -223,6 +224,16 @@ static int multi_cpu_stop(void *data)
                                if (is_active) {
                                        local_irq_disable();
                                        hard_irq_disable();
+
+                                       /*
+                                        * IPIs (from the inactive CPUs) might
+                                        * arrive late due to hardware latencies.
+                                        * So flush out any pending IPI callbacks
+                                        * explicitly, to ensure that the outgoing
+                                        * CPU doesn't go offline with work still
+                                        * pending (during CPU hotplug).
+                                        */
+                                       generic_smp_call_function_single_interrupt();
                                }
                                break;
                        case MULTI_STOP_RUN: