2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 #include <linux/rcupdate.h>
7 #include <linux/rculist.h>
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/percpu.h>
11 #include <linux/init.h>
12 #include <linux/gfp.h>
13 #include <linux/smp.h>
14 #include <linux/cpu.h>
15 #include <linux/hardirq.h>
19 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
24 struct call_function_data {
25 struct call_single_data __percpu *csd;
26 cpumask_var_t cpumask;
27 cpumask_var_t cpumask_ipi;
30 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
32 struct call_single_queue {
33 struct list_head list;
37 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
40 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
42 long cpu = (long)hcpu;
43 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
47 case CPU_UP_PREPARE_FROZEN:
48 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
50 return notifier_from_errno(-ENOMEM);
51 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
53 return notifier_from_errno(-ENOMEM);
54 cfd->csd = alloc_percpu(struct call_single_data);
56 free_cpumask_var(cfd->cpumask);
57 return notifier_from_errno(-ENOMEM);
61 #ifdef CONFIG_HOTPLUG_CPU
63 case CPU_UP_CANCELED_FROZEN:
67 free_cpumask_var(cfd->cpumask);
68 free_cpumask_var(cfd->cpumask_ipi);
69 free_percpu(cfd->csd);
77 static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
78 .notifier_call = hotplug_cfd,
81 void __init call_function_init(void)
83 void *cpu = (void *)(long)smp_processor_id();
86 for_each_possible_cpu(i) {
87 struct call_single_queue *q = &per_cpu(call_single_queue, i);
89 raw_spin_lock_init(&q->lock);
90 INIT_LIST_HEAD(&q->list);
93 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
94 register_cpu_notifier(&hotplug_cfd_notifier);
98 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
100 * For non-synchronous ipi calls the csd can still be in use by the
101 * previous function call. For multi-cpu calls its even more interesting
102 * as we'll have to ensure no other cpu is observing our csd.
104 static void csd_lock_wait(struct call_single_data *data)
106 while (data->flags & CSD_FLAG_LOCK)
110 static void csd_lock(struct call_single_data *data)
113 data->flags = CSD_FLAG_LOCK;
116 * prevent CPU from reordering the above assignment
117 * to ->flags with any subsequent assignments to other
118 * fields of the specified call_single_data structure:
123 static void csd_unlock(struct call_single_data *data)
125 WARN_ON(!(data->flags & CSD_FLAG_LOCK));
128 * ensure we're all done before releasing data:
132 data->flags &= ~CSD_FLAG_LOCK;
136 * Insert a previously allocated call_single_data element
137 * for execution on the given CPU. data must already have
138 * ->func, ->info, and ->flags set.
141 void generic_exec_single(int cpu, struct call_single_data *data, int wait)
143 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
147 raw_spin_lock_irqsave(&dst->lock, flags);
148 ipi = list_empty(&dst->list);
149 list_add_tail(&data->list, &dst->list);
150 raw_spin_unlock_irqrestore(&dst->lock, flags);
153 * The list addition should be visible before sending the IPI
154 * handler locks the list to pull the entry off it because of
155 * normal cache coherency rules implied by spinlocks.
157 * If IPIs can go out of order to the cache coherency protocol
158 * in an architecture, sufficient synchronisation should be added
159 * to arch code to make it appear to obey cache coherency WRT
160 * locking and barrier primitives. Generic code isn't really
161 * equipped to do the right thing...
164 arch_send_call_function_single_ipi(cpu);
171 * Invoked by arch to handle an IPI for call function single. Must be
172 * called from the arch with interrupts disabled.
174 void generic_smp_call_function_single_interrupt(void)
176 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
177 unsigned int data_flags;
181 * Shouldn't receive this interrupt on a cpu that is not yet online.
183 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
185 raw_spin_lock(&q->lock);
186 list_replace_init(&q->list, &list);
187 raw_spin_unlock(&q->lock);
189 while (!list_empty(&list)) {
190 struct call_single_data *data;
192 data = list_entry(list.next, struct call_single_data, list);
193 list_del(&data->list);
196 * 'data' can be invalid after this call if flags == 0
197 * (when called through generic_exec_single()),
198 * so save them away before making the call:
200 data_flags = data->flags;
202 data->func(data->info);
205 * Unlocked CSDs are valid through generic_exec_single():
207 if (data_flags & CSD_FLAG_LOCK)
212 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
215 * smp_call_function_single - Run a function on a specific CPU
216 * @func: The function to run. This must be fast and non-blocking.
217 * @info: An arbitrary pointer to pass to the function.
218 * @wait: If true, wait until function has completed on other CPUs.
220 * Returns 0 on success, else a negative status code.
222 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
225 struct call_single_data d = {
233 * prevent preemption and reschedule on another processor,
234 * as well as CPU removal
236 this_cpu = get_cpu();
239 * Can deadlock when called with interrupts disabled.
240 * We allow cpu's that are not yet online though, as no one else can
241 * send smp call function interrupt to this cpu and as such deadlocks
244 WARN_ON_ONCE(cpu_online(this_cpu)
245 && (irqs_disabled() || in_serving_irq())
246 && !oops_in_progress);
248 if (cpu == this_cpu) {
249 local_irq_save(flags);
251 local_irq_restore(flags);
253 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
254 struct call_single_data *data = &d;
257 data = &__get_cpu_var(csd_data);
263 generic_exec_single(cpu, data, wait);
265 err = -ENXIO; /* CPU not online */
273 EXPORT_SYMBOL(smp_call_function_single);
276 * smp_call_function_any - Run a function on any of the given cpus
277 * @mask: The mask of cpus it can run on.
278 * @func: The function to run. This must be fast and non-blocking.
279 * @info: An arbitrary pointer to pass to the function.
280 * @wait: If true, wait until function has completed.
282 * Returns 0 on success, else a negative status code (if no cpus were online).
283 * Note that @wait will be implicitly turned on in case of allocation failures,
284 * since we fall back to on-stack allocation.
286 * Selection preference:
287 * 1) current cpu if in @mask
288 * 2) any cpu of current node if in @mask
289 * 3) any other online cpu in @mask
291 int smp_call_function_any(const struct cpumask *mask,
292 smp_call_func_t func, void *info, int wait)
295 const struct cpumask *nodemask;
298 /* Try for same CPU (cheapest) */
300 if (cpumask_test_cpu(cpu, mask))
303 /* Try for same node. */
304 nodemask = cpumask_of_node(cpu_to_node(cpu));
305 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
306 cpu = cpumask_next_and(cpu, nodemask, mask)) {
311 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
312 cpu = cpumask_any_and(mask, cpu_online_mask);
314 ret = smp_call_function_single(cpu, func, info, wait);
318 EXPORT_SYMBOL_GPL(smp_call_function_any);
321 * __smp_call_function_single(): Run a function on a specific CPU
322 * @cpu: The CPU to run on.
323 * @data: Pre-allocated and setup data structure
324 * @wait: If true, wait until function has completed on specified CPU.
326 * Like smp_call_function_single(), but allow caller to pass in a
327 * pre-allocated data structure. Useful for embedding @data inside
328 * other structures, for instance.
330 void __smp_call_function_single(int cpu, struct call_single_data *data,
333 unsigned int this_cpu;
336 this_cpu = get_cpu();
338 * Can deadlock when called with interrupts disabled.
339 * We allow cpu's that are not yet online though, as no one else can
340 * send smp call function interrupt to this cpu and as such deadlocks
343 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
344 && !oops_in_progress);
346 if (cpu == this_cpu) {
347 local_irq_save(flags);
348 data->func(data->info);
349 local_irq_restore(flags);
352 generic_exec_single(cpu, data, wait);
358 * smp_call_function_many(): Run a function on a set of other CPUs.
359 * @mask: The set of cpus to run on (only runs on online subset).
360 * @func: The function to run. This must be fast and non-blocking.
361 * @info: An arbitrary pointer to pass to the function.
362 * @wait: If true, wait (atomically) until function has completed
365 * If @wait is true, then returns once @func has returned.
367 * You must not call this function with disabled interrupts or from a
368 * hardware interrupt handler or from a bottom half handler. Preemption
369 * must be disabled when calling this function.
371 void smp_call_function_many(const struct cpumask *mask,
372 smp_call_func_t func, void *info, bool wait)
374 struct call_function_data *data;
375 int cpu, next_cpu, this_cpu = smp_processor_id();
378 * Can deadlock when called with interrupts disabled.
379 * We allow cpu's that are not yet online though, as no one else can
380 * send smp call function interrupt to this cpu and as such deadlocks
383 WARN_ON_ONCE(cpu_online(this_cpu)
384 && (irqs_disabled() || in_serving_irq())
385 && !oops_in_progress && !early_boot_irqs_disabled);
387 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
388 cpu = cpumask_first_and(mask, cpu_online_mask);
390 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
392 /* No online cpus? We're done. */
393 if (cpu >= nr_cpu_ids)
396 /* Do we have another CPU which isn't us? */
397 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
398 if (next_cpu == this_cpu)
399 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
401 /* Fastpath: do that cpu by itself. */
402 if (next_cpu >= nr_cpu_ids) {
403 smp_call_function_single(cpu, func, info, wait);
407 data = &__get_cpu_var(cfd_data);
409 cpumask_and(data->cpumask, mask, cpu_online_mask);
410 cpumask_clear_cpu(this_cpu, data->cpumask);
412 /* Some callers race with other cpus changing the passed mask */
413 if (unlikely(!cpumask_weight(data->cpumask)))
417 * After we put an entry into the list, data->cpumask
418 * may be cleared again when another CPU sends another IPI for
419 * a SMP function call, so data->cpumask will be zero.
421 cpumask_copy(data->cpumask_ipi, data->cpumask);
423 for_each_cpu(cpu, data->cpumask) {
424 struct call_single_data *csd = per_cpu_ptr(data->csd, cpu);
425 struct call_single_queue *dst =
426 &per_cpu(call_single_queue, cpu);
433 raw_spin_lock_irqsave(&dst->lock, flags);
434 list_add_tail(&csd->list, &dst->list);
435 raw_spin_unlock_irqrestore(&dst->lock, flags);
438 /* Send a message to all CPUs in the map */
439 arch_send_call_function_ipi_mask(data->cpumask_ipi);
442 for_each_cpu(cpu, data->cpumask) {
443 struct call_single_data *csd =
444 per_cpu_ptr(data->csd, cpu);
449 EXPORT_SYMBOL(smp_call_function_many);
452 * smp_call_function(): Run a function on all other CPUs.
453 * @func: The function to run. This must be fast and non-blocking.
454 * @info: An arbitrary pointer to pass to the function.
455 * @wait: If true, wait (atomically) until function has completed
460 * If @wait is true, then returns once @func has returned; otherwise
461 * it returns just before the target cpu calls @func.
463 * You must not call this function with disabled interrupts or from a
464 * hardware interrupt handler or from a bottom half handler.
466 int smp_call_function(smp_call_func_t func, void *info, int wait)
469 smp_call_function_many(cpu_online_mask, func, info, wait);
474 EXPORT_SYMBOL(smp_call_function);
475 #endif /* USE_GENERIC_SMP_HELPERS */
477 /* Setup configured maximum number of CPUs to activate */
478 unsigned int setup_max_cpus = NR_CPUS;
479 EXPORT_SYMBOL(setup_max_cpus);
483 * Setup routine for controlling SMP activation
485 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
486 * activation entirely (the MPS table probe still happens, though).
488 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
489 * greater than 0, limits the maximum number of CPUs activated in
493 void __weak arch_disable_smp_support(void) { }
495 static int __init nosmp(char *str)
498 arch_disable_smp_support();
503 early_param("nosmp", nosmp);
505 /* this is hard limit */
506 static int __init nrcpus(char *str)
510 get_option(&str, &nr_cpus);
511 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
512 nr_cpu_ids = nr_cpus;
517 early_param("nr_cpus", nrcpus);
519 static int __init maxcpus(char *str)
521 get_option(&str, &setup_max_cpus);
522 if (setup_max_cpus == 0)
523 arch_disable_smp_support();
528 early_param("maxcpus", maxcpus);
530 /* Setup number of possible processor ids */
531 int nr_cpu_ids __read_mostly = NR_CPUS;
532 EXPORT_SYMBOL(nr_cpu_ids);
534 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
535 void __init setup_nr_cpu_ids(void)
537 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
540 /* Called by boot processor to activate the rest. */
541 void __init smp_init(void)
547 /* FIXME: This should be done in userspace --RR */
548 for_each_present_cpu(cpu) {
549 if (num_online_cpus() >= setup_max_cpus)
551 if (!cpu_online(cpu))
555 /* Any cleanup work */
556 printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
557 smp_cpus_done(setup_max_cpus);
561 * Call a function on all processors. May be used during early boot while
562 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
563 * of local_irq_disable/enable().
565 int on_each_cpu(void (*func) (void *info), void *info, int wait)
571 ret = smp_call_function(func, info, wait);
572 local_irq_save(flags);
574 local_irq_restore(flags);
578 EXPORT_SYMBOL(on_each_cpu);
581 * on_each_cpu_mask(): Run a function on processors specified by
582 * cpumask, which may include the local processor.
583 * @mask: The set of cpus to run on (only runs on online subset).
584 * @func: The function to run. This must be fast and non-blocking.
585 * @info: An arbitrary pointer to pass to the function.
586 * @wait: If true, wait (atomically) until function has completed
589 * If @wait is true, then returns once @func has returned.
591 * You must not call this function with disabled interrupts or
592 * from a hardware interrupt handler or from a bottom half handler.
594 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
595 void *info, bool wait)
599 smp_call_function_many(mask, func, info, wait);
600 if (cpumask_test_cpu(cpu, mask)) {
607 EXPORT_SYMBOL(on_each_cpu_mask);
610 * on_each_cpu_cond(): Call a function on each processor for which
611 * the supplied function cond_func returns true, optionally waiting
612 * for all the required CPUs to finish. This may include the local
614 * @cond_func: A callback function that is passed a cpu id and
615 * the the info parameter. The function is called
616 * with preemption disabled. The function should
617 * return a blooean value indicating whether to IPI
619 * @func: The function to run on all applicable CPUs.
620 * This must be fast and non-blocking.
621 * @info: An arbitrary pointer to pass to both functions.
622 * @wait: If true, wait (atomically) until function has
623 * completed on other CPUs.
624 * @gfp_flags: GFP flags to use when allocating the cpumask
625 * used internally by the function.
627 * The function might sleep if the GFP flags indicates a non
628 * atomic allocation is allowed.
630 * Preemption is disabled to protect against CPUs going offline but not online.
631 * CPUs going online during the call will not be seen or sent an IPI.
633 * You must not call this function with disabled interrupts or
634 * from a hardware interrupt handler or from a bottom half handler.
636 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
637 smp_call_func_t func, void *info, bool wait,
643 might_sleep_if(gfp_flags & __GFP_WAIT);
645 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
647 for_each_online_cpu(cpu)
648 if (cond_func(cpu, info))
649 cpumask_set_cpu(cpu, cpus);
650 on_each_cpu_mask(cpus, func, info, wait);
652 free_cpumask_var(cpus);
655 * No free cpumask, bother. No matter, we'll
656 * just have to IPI them one by one.
659 for_each_online_cpu(cpu)
660 if (cond_func(cpu, info)) {
661 ret = smp_call_function_single(cpu, func,
668 EXPORT_SYMBOL(on_each_cpu_cond);
670 static void do_nothing(void *unused)
675 * kick_all_cpus_sync - Force all cpus out of idle
677 * Used to synchronize the update of pm_idle function pointer. It's
678 * called after the pointer is updated and returns after the dummy
679 * callback function has been executed on all cpus. The execution of
680 * the function can only happen on the remote cpus after they have
681 * left the idle function which had been called via pm_idle function
682 * pointer. So it's guaranteed that nothing uses the previous pointer
685 void kick_all_cpus_sync(void)
687 /* Make sure the change is visible before we kick the cpus */
689 smp_call_function(do_nothing, NULL, 1);
691 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);