2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 #include <linux/rcupdate.h>
7 #include <linux/rculist.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/percpu.h>
11 #include <linux/init.h>
12 #include <linux/gfp.h>
13 #include <linux/smp.h>
14 #include <linux/cpu.h>
16 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
18 struct list_head queue;
20 } call_function __cacheline_aligned_in_smp =
22 .queue = LIST_HEAD_INIT(call_function.queue),
23 .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
30 struct call_function_data {
31 struct call_single_data csd;
33 cpumask_var_t cpumask;
36 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
38 struct call_single_queue {
39 struct list_head list;
43 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
46 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
48 long cpu = (long)hcpu;
49 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
53 case CPU_UP_PREPARE_FROZEN:
54 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
56 return notifier_from_errno(-ENOMEM);
59 #ifdef CONFIG_HOTPLUG_CPU
61 case CPU_UP_CANCELED_FROZEN:
65 free_cpumask_var(cfd->cpumask);
73 static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
74 .notifier_call = hotplug_cfd,
77 static int __cpuinit init_call_single_data(void)
79 void *cpu = (void *)(long)smp_processor_id();
82 for_each_possible_cpu(i) {
83 struct call_single_queue *q = &per_cpu(call_single_queue, i);
85 raw_spin_lock_init(&q->lock);
86 INIT_LIST_HEAD(&q->list);
89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
90 register_cpu_notifier(&hotplug_cfd_notifier);
94 early_initcall(init_call_single_data);
97 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
99 * For non-synchronous ipi calls the csd can still be in use by the
100 * previous function call. For multi-cpu calls its even more interesting
101 * as we'll have to ensure no other cpu is observing our csd.
103 static void csd_lock_wait(struct call_single_data *data)
105 while (data->flags & CSD_FLAG_LOCK)
109 static void csd_lock(struct call_single_data *data)
112 data->flags = CSD_FLAG_LOCK;
115 * prevent CPU from reordering the above assignment
116 * to ->flags with any subsequent assignments to other
117 * fields of the specified call_single_data structure:
122 static void csd_unlock(struct call_single_data *data)
124 WARN_ON(!(data->flags & CSD_FLAG_LOCK));
127 * ensure we're all done before releasing data:
131 data->flags &= ~CSD_FLAG_LOCK;
135 * Insert a previously allocated call_single_data element
136 * for execution on the given CPU. data must already have
137 * ->func, ->info, and ->flags set.
140 void generic_exec_single(int cpu, struct call_single_data *data, int wait)
142 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
146 raw_spin_lock_irqsave(&dst->lock, flags);
147 ipi = list_empty(&dst->list);
148 list_add_tail(&data->list, &dst->list);
149 raw_spin_unlock_irqrestore(&dst->lock, flags);
152 * The list addition should be visible before sending the IPI
153 * handler locks the list to pull the entry off it because of
154 * normal cache coherency rules implied by spinlocks.
156 * If IPIs can go out of order to the cache coherency protocol
157 * in an architecture, sufficient synchronisation should be added
158 * to arch code to make it appear to obey cache coherency WRT
159 * locking and barrier primitives. Generic code isn't really
160 * equipped to do the right thing...
163 arch_send_call_function_single_ipi(cpu);
170 * Invoked by arch to handle an IPI for call function. Must be called with
171 * interrupts disabled.
173 void generic_smp_call_function_interrupt(void)
175 struct call_function_data *data;
176 int cpu = smp_processor_id();
179 * Shouldn't receive this interrupt on a cpu that is not yet online.
181 WARN_ON_ONCE(!cpu_online(cpu));
184 * Ensure entry is visible on call_function_queue after we have
185 * entered the IPI. See comment in smp_call_function_many.
186 * If we don't have this, then we may miss an entry on the list
187 * and never get another IPI to process it.
192 * It's ok to use list_for_each_rcu() here even though we may
193 * delete 'pos', since list_del_rcu() doesn't clear ->next
195 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
199 * Since we walk the list without any locks, we might
200 * see an entry that was completed, removed from the
201 * list and is in the process of being reused.
203 * We must check that the cpu is in the cpumask before
204 * checking the refs, and both must be set before
205 * executing the callback on this cpu.
208 if (!cpumask_test_cpu(cpu, data->cpumask))
213 if (atomic_read(&data->refs) == 0)
216 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
219 data->csd.func(data->csd.info);
221 refs = atomic_dec_return(&data->refs);
224 WARN_ON(!cpumask_empty(data->cpumask));
226 raw_spin_lock(&call_function.lock);
227 list_del_rcu(&data->csd.list);
228 raw_spin_unlock(&call_function.lock);
234 csd_unlock(&data->csd);
240 * Invoked by arch to handle an IPI for call function single. Must be
241 * called from the arch with interrupts disabled.
243 void generic_smp_call_function_single_interrupt(void)
245 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
246 unsigned int data_flags;
250 * Shouldn't receive this interrupt on a cpu that is not yet online.
252 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
254 raw_spin_lock(&q->lock);
255 list_replace_init(&q->list, &list);
256 raw_spin_unlock(&q->lock);
258 while (!list_empty(&list)) {
259 struct call_single_data *data;
261 data = list_entry(list.next, struct call_single_data, list);
262 list_del(&data->list);
265 * 'data' can be invalid after this call if flags == 0
266 * (when called through generic_exec_single()),
267 * so save them away before making the call:
269 data_flags = data->flags;
271 data->func(data->info);
274 * Unlocked CSDs are valid through generic_exec_single():
276 if (data_flags & CSD_FLAG_LOCK)
281 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
284 * smp_call_function_single - Run a function on a specific CPU
285 * @func: The function to run. This must be fast and non-blocking.
286 * @info: An arbitrary pointer to pass to the function.
287 * @wait: If true, wait until function has completed on other CPUs.
289 * Returns 0 on success, else a negative status code.
291 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
294 struct call_single_data d = {
302 * prevent preemption and reschedule on another processor,
303 * as well as CPU removal
305 this_cpu = get_cpu();
308 * Can deadlock when called with interrupts disabled.
309 * We allow cpu's that are not yet online though, as no one else can
310 * send smp call function interrupt to this cpu and as such deadlocks
313 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
314 && !oops_in_progress);
316 if (cpu == this_cpu) {
317 local_irq_save(flags);
319 local_irq_restore(flags);
321 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
322 struct call_single_data *data = &d;
325 data = &__get_cpu_var(csd_data);
331 generic_exec_single(cpu, data, wait);
333 err = -ENXIO; /* CPU not online */
341 EXPORT_SYMBOL(smp_call_function_single);
344 * smp_call_function_any - Run a function on any of the given cpus
345 * @mask: The mask of cpus it can run on.
346 * @func: The function to run. This must be fast and non-blocking.
347 * @info: An arbitrary pointer to pass to the function.
348 * @wait: If true, wait until function has completed.
350 * Returns 0 on success, else a negative status code (if no cpus were online).
351 * Note that @wait will be implicitly turned on in case of allocation failures,
352 * since we fall back to on-stack allocation.
354 * Selection preference:
355 * 1) current cpu if in @mask
356 * 2) any cpu of current node if in @mask
357 * 3) any other online cpu in @mask
359 int smp_call_function_any(const struct cpumask *mask,
360 smp_call_func_t func, void *info, int wait)
363 const struct cpumask *nodemask;
366 /* Try for same CPU (cheapest) */
368 if (cpumask_test_cpu(cpu, mask))
371 /* Try for same node. */
372 nodemask = cpumask_of_node(cpu_to_node(cpu));
373 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
374 cpu = cpumask_next_and(cpu, nodemask, mask)) {
379 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
380 cpu = cpumask_any_and(mask, cpu_online_mask);
382 ret = smp_call_function_single(cpu, func, info, wait);
386 EXPORT_SYMBOL_GPL(smp_call_function_any);
389 * __smp_call_function_single(): Run a function on a specific CPU
390 * @cpu: The CPU to run on.
391 * @data: Pre-allocated and setup data structure
392 * @wait: If true, wait until function has completed on specified CPU.
394 * Like smp_call_function_single(), but allow caller to pass in a
395 * pre-allocated data structure. Useful for embedding @data inside
396 * other structures, for instance.
398 void __smp_call_function_single(int cpu, struct call_single_data *data,
401 unsigned int this_cpu;
404 this_cpu = get_cpu();
406 * Can deadlock when called with interrupts disabled.
407 * We allow cpu's that are not yet online though, as no one else can
408 * send smp call function interrupt to this cpu and as such deadlocks
411 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
412 && !oops_in_progress);
414 if (cpu == this_cpu) {
415 local_irq_save(flags);
416 data->func(data->info);
417 local_irq_restore(flags);
420 generic_exec_single(cpu, data, wait);
426 * smp_call_function_many(): Run a function on a set of other CPUs.
427 * @mask: The set of cpus to run on (only runs on online subset).
428 * @func: The function to run. This must be fast and non-blocking.
429 * @info: An arbitrary pointer to pass to the function.
430 * @wait: If true, wait (atomically) until function has completed
433 * If @wait is true, then returns once @func has returned.
435 * You must not call this function with disabled interrupts or from a
436 * hardware interrupt handler or from a bottom half handler. Preemption
437 * must be disabled when calling this function.
439 void smp_call_function_many(const struct cpumask *mask,
440 smp_call_func_t func, void *info, bool wait)
442 struct call_function_data *data;
444 int cpu, next_cpu, this_cpu = smp_processor_id();
447 * Can deadlock when called with interrupts disabled.
448 * We allow cpu's that are not yet online though, as no one else can
449 * send smp call function interrupt to this cpu and as such deadlocks
452 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
453 && !oops_in_progress);
455 /* So, what's a CPU they want? Ignoring this one. */
456 cpu = cpumask_first_and(mask, cpu_online_mask);
458 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
460 /* No online cpus? We're done. */
461 if (cpu >= nr_cpu_ids)
464 /* Do we have another CPU which isn't us? */
465 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
466 if (next_cpu == this_cpu)
467 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
469 /* Fastpath: do that cpu by itself. */
470 if (next_cpu >= nr_cpu_ids) {
471 smp_call_function_single(cpu, func, info, wait);
475 data = &__get_cpu_var(cfd_data);
476 csd_lock(&data->csd);
477 BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
479 data->csd.func = func;
480 data->csd.info = info;
481 cpumask_and(data->cpumask, mask, cpu_online_mask);
482 cpumask_clear_cpu(this_cpu, data->cpumask);
485 * To ensure the interrupt handler gets an complete view
486 * we order the cpumask and refs writes and order the read
487 * of them in the interrupt handler. In addition we may
488 * only clear our own cpu bit from the mask.
492 atomic_set(&data->refs, cpumask_weight(data->cpumask));
494 raw_spin_lock_irqsave(&call_function.lock, flags);
496 * Place entry at the _HEAD_ of the list, so that any cpu still
497 * observing the entry in generic_smp_call_function_interrupt()
498 * will not miss any other list entries:
500 list_add_rcu(&data->csd.list, &call_function.queue);
501 raw_spin_unlock_irqrestore(&call_function.lock, flags);
504 * Make the list addition visible before sending the ipi.
505 * (IPIs must obey or appear to obey normal Linux cache
506 * coherency rules -- see comment in generic_exec_single).
510 /* Send a message to all CPUs in the map */
511 arch_send_call_function_ipi_mask(data->cpumask);
513 /* Optionally wait for the CPUs to complete */
515 csd_lock_wait(&data->csd);
517 EXPORT_SYMBOL(smp_call_function_many);
520 * smp_call_function(): Run a function on all other CPUs.
521 * @func: The function to run. This must be fast and non-blocking.
522 * @info: An arbitrary pointer to pass to the function.
523 * @wait: If true, wait (atomically) until function has completed
528 * If @wait is true, then returns once @func has returned; otherwise
529 * it returns just before the target cpu calls @func.
531 * You must not call this function with disabled interrupts or from a
532 * hardware interrupt handler or from a bottom half handler.
534 int smp_call_function(smp_call_func_t func, void *info, int wait)
537 smp_call_function_many(cpu_online_mask, func, info, wait);
542 EXPORT_SYMBOL(smp_call_function);
544 void ipi_call_lock(void)
546 raw_spin_lock(&call_function.lock);
549 void ipi_call_unlock(void)
551 raw_spin_unlock(&call_function.lock);
554 void ipi_call_lock_irq(void)
556 raw_spin_lock_irq(&call_function.lock);
559 void ipi_call_unlock_irq(void)
561 raw_spin_unlock_irq(&call_function.lock);
563 #endif /* USE_GENERIC_SMP_HELPERS */
566 * Call a function on all processors
568 int on_each_cpu(void (*func) (void *info), void *info, int wait)
573 ret = smp_call_function(func, info, wait);
580 EXPORT_SYMBOL(on_each_cpu);