2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <linux/irq.h>
25 #include <linux/smpboot.h>
27 #include <trace/events/power.h>
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/cpuhp.h>
34 * cpuhp_cpu_state - Per cpu hotplug state storage
35 * @state: The current cpu state
36 * @target: The target state
37 * @thread: Pointer to the hotplug thread
38 * @should_run: Thread should execute
39 * @cb_stat: The state for a single callback (install/uninstall)
40 * @cb: Single callback function (install/uninstall)
41 * @result: Result of the operation
42 * @done: Signal completion to the issuer of the task
44 struct cpuhp_cpu_state {
45 enum cpuhp_state state;
46 enum cpuhp_state target;
48 struct task_struct *thread;
50 enum cpuhp_state cb_state;
51 int (*cb)(unsigned int cpu);
53 struct completion done;
57 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
60 * cpuhp_step - Hotplug state machine step
61 * @name: Name of the step
62 * @startup: Startup function of the step
63 * @teardown: Teardown function of the step
64 * @skip_onerr: Do not invoke the functions on error rollback
65 * Will go away once the notifiers are gone
66 * @cant_stop: Bringup/teardown can't be stopped at this step
70 int (*startup)(unsigned int cpu);
71 int (*teardown)(unsigned int cpu);
76 static DEFINE_MUTEX(cpuhp_state_mutex);
77 static struct cpuhp_step cpuhp_bp_states[];
78 static struct cpuhp_step cpuhp_ap_states[];
81 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
82 * @cpu: The cpu for which the callback should be invoked
83 * @step: The step in the state machine
84 * @cb: The callback function to invoke
86 * Called from cpu hotplug and from the state register machinery
88 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step,
89 int (*cb)(unsigned int))
91 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
95 trace_cpuhp_enter(cpu, st->target, step, cb);
97 trace_cpuhp_exit(cpu, st->state, step, ret);
103 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
104 static DEFINE_MUTEX(cpu_add_remove_lock);
105 bool cpuhp_tasks_frozen;
106 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
109 * The following two APIs (cpu_maps_update_begin/done) must be used when
110 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
111 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
112 * hotplug callback (un)registration performed using __register_cpu_notifier()
113 * or __unregister_cpu_notifier().
115 void cpu_maps_update_begin(void)
117 mutex_lock(&cpu_add_remove_lock);
119 EXPORT_SYMBOL(cpu_notifier_register_begin);
121 void cpu_maps_update_done(void)
123 mutex_unlock(&cpu_add_remove_lock);
125 EXPORT_SYMBOL(cpu_notifier_register_done);
127 static RAW_NOTIFIER_HEAD(cpu_chain);
129 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
130 * Should always be manipulated under cpu_add_remove_lock
132 static int cpu_hotplug_disabled;
134 #ifdef CONFIG_HOTPLUG_CPU
137 struct task_struct *active_writer;
138 /* wait queue to wake up the active_writer */
139 wait_queue_head_t wq;
140 /* verifies that no writer will get active while readers are active */
143 * Also blocks the new readers during
144 * an ongoing cpu hotplug operation.
148 #ifdef CONFIG_DEBUG_LOCK_ALLOC
149 struct lockdep_map dep_map;
152 .active_writer = NULL,
153 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
154 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
155 #ifdef CONFIG_DEBUG_LOCK_ALLOC
156 .dep_map = {.name = "cpu_hotplug.lock" },
160 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
161 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
162 #define cpuhp_lock_acquire_tryread() \
163 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
164 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
165 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
168 void get_online_cpus(void)
171 if (cpu_hotplug.active_writer == current)
173 cpuhp_lock_acquire_read();
174 mutex_lock(&cpu_hotplug.lock);
175 atomic_inc(&cpu_hotplug.refcount);
176 mutex_unlock(&cpu_hotplug.lock);
178 EXPORT_SYMBOL_GPL(get_online_cpus);
180 void put_online_cpus(void)
184 if (cpu_hotplug.active_writer == current)
187 refcount = atomic_dec_return(&cpu_hotplug.refcount);
188 if (WARN_ON(refcount < 0)) /* try to fix things up */
189 atomic_inc(&cpu_hotplug.refcount);
191 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
192 wake_up(&cpu_hotplug.wq);
194 cpuhp_lock_release();
197 EXPORT_SYMBOL_GPL(put_online_cpus);
200 * This ensures that the hotplug operation can begin only when the
201 * refcount goes to zero.
203 * Note that during a cpu-hotplug operation, the new readers, if any,
204 * will be blocked by the cpu_hotplug.lock
206 * Since cpu_hotplug_begin() is always called after invoking
207 * cpu_maps_update_begin(), we can be sure that only one writer is active.
209 * Note that theoretically, there is a possibility of a livelock:
210 * - Refcount goes to zero, last reader wakes up the sleeping
212 * - Last reader unlocks the cpu_hotplug.lock.
213 * - A new reader arrives at this moment, bumps up the refcount.
214 * - The writer acquires the cpu_hotplug.lock finds the refcount
215 * non zero and goes to sleep again.
217 * However, this is very difficult to achieve in practice since
218 * get_online_cpus() not an api which is called all that often.
221 void cpu_hotplug_begin(void)
225 cpu_hotplug.active_writer = current;
226 cpuhp_lock_acquire();
229 mutex_lock(&cpu_hotplug.lock);
230 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
231 if (likely(!atomic_read(&cpu_hotplug.refcount)))
233 mutex_unlock(&cpu_hotplug.lock);
236 finish_wait(&cpu_hotplug.wq, &wait);
239 void cpu_hotplug_done(void)
241 cpu_hotplug.active_writer = NULL;
242 mutex_unlock(&cpu_hotplug.lock);
243 cpuhp_lock_release();
247 * Wait for currently running CPU hotplug operations to complete (if any) and
248 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
249 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
250 * hotplug path before performing hotplug operations. So acquiring that lock
251 * guarantees mutual exclusion from any currently running hotplug operations.
253 void cpu_hotplug_disable(void)
255 cpu_maps_update_begin();
256 cpu_hotplug_disabled++;
257 cpu_maps_update_done();
259 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
261 void cpu_hotplug_enable(void)
263 cpu_maps_update_begin();
264 WARN_ON(--cpu_hotplug_disabled < 0);
265 cpu_maps_update_done();
267 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
268 #endif /* CONFIG_HOTPLUG_CPU */
270 /* Need to know about CPUs going up/down? */
271 int register_cpu_notifier(struct notifier_block *nb)
274 cpu_maps_update_begin();
275 ret = raw_notifier_chain_register(&cpu_chain, nb);
276 cpu_maps_update_done();
280 int __register_cpu_notifier(struct notifier_block *nb)
282 return raw_notifier_chain_register(&cpu_chain, nb);
285 static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
288 unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
289 void *hcpu = (void *)(long)cpu;
293 ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
296 return notifier_to_errno(ret);
299 static int cpu_notify(unsigned long val, unsigned int cpu)
301 return __cpu_notify(val, cpu, -1, NULL);
304 /* Notifier wrappers for transitioning to state machine */
305 static int notify_prepare(unsigned int cpu)
310 ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
313 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
315 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
320 static int notify_online(unsigned int cpu)
322 cpu_notify(CPU_ONLINE, cpu);
326 static int notify_starting(unsigned int cpu)
328 cpu_notify(CPU_STARTING, cpu);
332 static int bringup_wait_for_ap(unsigned int cpu)
334 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
336 wait_for_completion(&st->done);
340 static int bringup_cpu(unsigned int cpu)
342 struct task_struct *idle = idle_thread_get(cpu);
345 /* Arch-specific enabling code. */
346 ret = __cpu_up(cpu, idle);
348 cpu_notify(CPU_UP_CANCELED, cpu);
351 ret = bringup_wait_for_ap(cpu);
352 BUG_ON(!cpu_online(cpu));
357 * Hotplug state machine related functions
359 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st,
360 struct cpuhp_step *steps)
362 for (st->state++; st->state < st->target; st->state++) {
363 struct cpuhp_step *step = steps + st->state;
365 if (!step->skip_onerr)
366 cpuhp_invoke_callback(cpu, st->state, step->startup);
370 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
371 struct cpuhp_step *steps, enum cpuhp_state target)
373 enum cpuhp_state prev_state = st->state;
376 for (; st->state > target; st->state--) {
377 struct cpuhp_step *step = steps + st->state;
379 ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
381 st->target = prev_state;
382 undo_cpu_down(cpu, st, steps);
389 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st,
390 struct cpuhp_step *steps)
392 for (st->state--; st->state > st->target; st->state--) {
393 struct cpuhp_step *step = steps + st->state;
395 if (!step->skip_onerr)
396 cpuhp_invoke_callback(cpu, st->state, step->teardown);
400 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
401 struct cpuhp_step *steps, enum cpuhp_state target)
403 enum cpuhp_state prev_state = st->state;
406 while (st->state < target) {
407 struct cpuhp_step *step;
410 step = steps + st->state;
411 ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
413 st->target = prev_state;
414 undo_cpu_up(cpu, st, steps);
422 * The cpu hotplug threads manage the bringup and teardown of the cpus
424 static void cpuhp_create(unsigned int cpu)
426 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
428 init_completion(&st->done);
431 static int cpuhp_should_run(unsigned int cpu)
433 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
435 return st->should_run;
438 /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
439 static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
441 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
443 return cpuhp_down_callbacks(cpu, st, cpuhp_ap_states, target);
446 /* Execute the online startup callbacks. Used to be CPU_ONLINE */
447 static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
449 return cpuhp_up_callbacks(cpu, st, cpuhp_ap_states, st->target);
453 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
454 * callbacks when a state gets [un]installed at runtime.
456 static void cpuhp_thread_fun(unsigned int cpu)
458 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
462 * Paired with the mb() in cpuhp_kick_ap_work and
463 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
469 st->should_run = false;
471 /* Single callback invocation for [un]install ? */
473 if (st->cb_state < CPUHP_AP_ONLINE) {
475 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
478 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
481 /* Cannot happen .... */
482 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
484 /* Regular hotplug work */
485 if (st->state < st->target)
486 ret = cpuhp_ap_online(cpu, st);
487 else if (st->state > st->target)
488 ret = cpuhp_ap_offline(cpu, st);
494 /* Invoke a single callback on a remote cpu */
495 static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state,
496 int (*cb)(unsigned int))
498 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
500 if (!cpu_online(cpu))
503 st->cb_state = state;
506 * Make sure the above stores are visible before should_run becomes
507 * true. Paired with the mb() above in cpuhp_thread_fun()
510 st->should_run = true;
511 wake_up_process(st->thread);
512 wait_for_completion(&st->done);
516 /* Regular hotplug invocation of the AP hotplug thread */
517 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
522 * Make sure the above stores are visible before should_run becomes
523 * true. Paired with the mb() above in cpuhp_thread_fun()
526 st->should_run = true;
527 wake_up_process(st->thread);
530 static int cpuhp_kick_ap_work(unsigned int cpu)
532 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
533 enum cpuhp_state state = st->state;
535 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
536 __cpuhp_kick_ap_work(st);
537 wait_for_completion(&st->done);
538 trace_cpuhp_exit(cpu, st->state, state, st->result);
542 static struct smp_hotplug_thread cpuhp_threads = {
543 .store = &cpuhp_state.thread,
544 .create = &cpuhp_create,
545 .thread_should_run = cpuhp_should_run,
546 .thread_fn = cpuhp_thread_fun,
547 .thread_comm = "cpuhp/%u",
551 void __init cpuhp_threads_init(void)
553 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
554 kthread_unpark(this_cpu_read(cpuhp_state.thread));
557 #ifdef CONFIG_HOTPLUG_CPU
558 EXPORT_SYMBOL(register_cpu_notifier);
559 EXPORT_SYMBOL(__register_cpu_notifier);
560 void unregister_cpu_notifier(struct notifier_block *nb)
562 cpu_maps_update_begin();
563 raw_notifier_chain_unregister(&cpu_chain, nb);
564 cpu_maps_update_done();
566 EXPORT_SYMBOL(unregister_cpu_notifier);
568 void __unregister_cpu_notifier(struct notifier_block *nb)
570 raw_notifier_chain_unregister(&cpu_chain, nb);
572 EXPORT_SYMBOL(__unregister_cpu_notifier);
575 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
578 * This function walks all processes, finds a valid mm struct for each one and
579 * then clears a corresponding bit in mm's cpumask. While this all sounds
580 * trivial, there are various non-obvious corner cases, which this function
581 * tries to solve in a safe manner.
583 * Also note that the function uses a somewhat relaxed locking scheme, so it may
584 * be called only for an already offlined CPU.
586 void clear_tasks_mm_cpumask(int cpu)
588 struct task_struct *p;
591 * This function is called after the cpu is taken down and marked
592 * offline, so its not like new tasks will ever get this cpu set in
593 * their mm mask. -- Peter Zijlstra
594 * Thus, we may use rcu_read_lock() here, instead of grabbing
595 * full-fledged tasklist_lock.
597 WARN_ON(cpu_online(cpu));
599 for_each_process(p) {
600 struct task_struct *t;
603 * Main thread might exit, but other threads may still have
604 * a valid mm. Find one.
606 t = find_lock_task_mm(p);
609 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
615 static inline void check_for_tasks(int dead_cpu)
617 struct task_struct *g, *p;
619 read_lock(&tasklist_lock);
620 for_each_process_thread(g, p) {
624 * We do the check with unlocked task_rq(p)->lock.
625 * Order the reading to do not warn about a task,
626 * which was running on this cpu in the past, and
627 * it's just been woken on another cpu.
630 if (task_cpu(p) != dead_cpu)
633 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
634 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
636 read_unlock(&tasklist_lock);
639 static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
641 BUG_ON(cpu_notify(val, cpu));
644 static int notify_down_prepare(unsigned int cpu)
646 int err, nr_calls = 0;
648 err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
651 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
652 pr_warn("%s: attempt to take down CPU %u failed\n",
658 static int notify_dying(unsigned int cpu)
660 cpu_notify(CPU_DYING, cpu);
664 /* Take this CPU down. */
665 static int take_cpu_down(void *_param)
667 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
668 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
669 int err, cpu = smp_processor_id();
671 /* Ensure this CPU doesn't handle any more interrupts. */
672 err = __cpu_disable();
676 /* Invoke the former CPU_DYING callbacks */
677 for (; st->state > target; st->state--) {
678 struct cpuhp_step *step = cpuhp_ap_states + st->state;
680 cpuhp_invoke_callback(cpu, st->state, step->teardown);
682 /* Give up timekeeping duties */
683 tick_handover_do_timer();
684 /* Park the stopper thread */
685 stop_machine_park(cpu);
689 static int takedown_cpu(unsigned int cpu)
691 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
695 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
696 * and RCU users of this state to go away such that all new such users
699 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
700 * not imply sync_sched(), so wait for both.
702 * Do sync before park smpboot threads to take care the rcu boost case.
704 if (IS_ENABLED(CONFIG_PREEMPT))
705 synchronize_rcu_mult(call_rcu, call_rcu_sched);
709 /* Park the hotplug thread */
710 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
713 * Prevent irq alloc/free while the dying cpu reorganizes the
714 * interrupt affinities.
719 * So now all preempt/rcu users must observe !cpu_active().
721 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
723 /* CPU didn't die: tell everyone. Can't complain. */
724 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
728 BUG_ON(cpu_online(cpu));
731 * The migration_call() CPU_DYING callback will have removed all
732 * runnable tasks from the cpu, there's only the idle task left now
733 * that the migration thread is done doing the stop_machine thing.
735 * Wait for the stop thread to go away.
737 wait_for_completion(&st->done);
738 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
740 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
743 hotplug_cpu__broadcast_tick_pull(cpu);
744 /* This actually kills the CPU. */
747 tick_cleanup_dead_cpu(cpu);
751 static int notify_dead(unsigned int cpu)
753 cpu_notify_nofail(CPU_DEAD, cpu);
754 check_for_tasks(cpu);
758 static void cpuhp_complete_idle_dead(void *arg)
760 struct cpuhp_cpu_state *st = arg;
765 void cpuhp_report_idle_dead(void)
767 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
769 BUG_ON(st->state != CPUHP_AP_OFFLINE);
770 rcu_report_dead(smp_processor_id());
771 st->state = CPUHP_AP_IDLE_DEAD;
773 * We cannot call complete after rcu_report_dead() so we delegate it
776 smp_call_function_single(cpumask_first(cpu_online_mask),
777 cpuhp_complete_idle_dead, st, 0);
781 #define notify_down_prepare NULL
782 #define takedown_cpu NULL
783 #define notify_dead NULL
784 #define notify_dying NULL
787 #ifdef CONFIG_HOTPLUG_CPU
789 /* Requires cpu_add_remove_lock to be held */
790 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
791 enum cpuhp_state target)
793 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
794 int prev_state, ret = 0;
795 bool hasdied = false;
797 if (num_online_cpus() == 1)
800 if (!cpu_present(cpu))
805 cpuhp_tasks_frozen = tasks_frozen;
807 prev_state = st->state;
810 * If the current CPU state is in the range of the AP hotplug thread,
811 * then we need to kick the thread.
813 if (st->state > CPUHP_TEARDOWN_CPU) {
814 ret = cpuhp_kick_ap_work(cpu);
816 * The AP side has done the error rollback already. Just
817 * return the error code..
823 * We might have stopped still in the range of the AP hotplug
824 * thread. Nothing to do anymore.
826 if (st->state > CPUHP_TEARDOWN_CPU)
830 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
831 * to do the further cleanups.
833 ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
835 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
838 /* This post dead nonsense must die */
840 cpu_notify_nofail(CPU_POST_DEAD, cpu);
844 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
848 cpu_maps_update_begin();
850 if (cpu_hotplug_disabled) {
855 err = _cpu_down(cpu, 0, target);
858 cpu_maps_update_done();
861 int cpu_down(unsigned int cpu)
863 return do_cpu_down(cpu, CPUHP_OFFLINE);
865 EXPORT_SYMBOL(cpu_down);
866 #endif /*CONFIG_HOTPLUG_CPU*/
869 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
870 * @cpu: cpu that just started
872 * This function calls the cpu_chain notifiers with CPU_STARTING.
873 * It must be called by the arch code on the new cpu, before the new cpu
874 * enables interrupts and before the "boot" cpu returns from __cpu_up().
876 void notify_cpu_starting(unsigned int cpu)
878 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
879 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
881 while (st->state < target) {
882 struct cpuhp_step *step;
885 step = cpuhp_ap_states + st->state;
886 cpuhp_invoke_callback(cpu, st->state, step->startup);
891 * Called from the idle task. We need to set active here, so we can kick off
892 * the stopper thread and unpark the smpboot threads. If the target state is
893 * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
896 void cpuhp_online_idle(enum cpuhp_state state)
898 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
899 unsigned int cpu = smp_processor_id();
901 /* Happens for the boot cpu */
902 if (state != CPUHP_AP_ONLINE_IDLE)
905 st->state = CPUHP_AP_ONLINE_IDLE;
907 /* The cpu is marked online, set it active now */
908 set_cpu_active(cpu, true);
909 /* Unpark the stopper thread and the hotplug thread of this cpu */
910 stop_machine_unpark(cpu);
911 kthread_unpark(st->thread);
913 /* Should we go further up ? */
914 if (st->target > CPUHP_AP_ONLINE_IDLE)
915 __cpuhp_kick_ap_work(st);
920 /* Requires cpu_add_remove_lock to be held */
921 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
923 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
924 struct task_struct *idle;
929 if (!cpu_present(cpu)) {
935 * The caller of do_cpu_up might have raced with another
936 * caller. Ignore it for now.
938 if (st->state >= target)
941 if (st->state == CPUHP_OFFLINE) {
942 /* Let it fail before we try to bring the cpu up */
943 idle = idle_thread_get(cpu);
950 cpuhp_tasks_frozen = tasks_frozen;
954 * If the current CPU state is in the range of the AP hotplug thread,
955 * then we need to kick the thread once more.
957 if (st->state > CPUHP_BRINGUP_CPU) {
958 ret = cpuhp_kick_ap_work(cpu);
960 * The AP side has done the error rollback already. Just
961 * return the error code..
968 * Try to reach the target state. We max out on the BP at
969 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
970 * responsible for bringing it up to the target state.
972 target = min((int)target, CPUHP_BRINGUP_CPU);
973 ret = cpuhp_up_callbacks(cpu, st, cpuhp_bp_states, target);
979 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
983 if (!cpu_possible(cpu)) {
984 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
986 #if defined(CONFIG_IA64)
987 pr_err("please check additional_cpus= boot parameter\n");
992 err = try_online_node(cpu_to_node(cpu));
996 cpu_maps_update_begin();
998 if (cpu_hotplug_disabled) {
1003 err = _cpu_up(cpu, 0, target);
1005 cpu_maps_update_done();
1009 int cpu_up(unsigned int cpu)
1011 return do_cpu_up(cpu, CPUHP_ONLINE);
1013 EXPORT_SYMBOL_GPL(cpu_up);
1015 #ifdef CONFIG_PM_SLEEP_SMP
1016 static cpumask_var_t frozen_cpus;
1018 int disable_nonboot_cpus(void)
1020 int cpu, first_cpu, error = 0;
1022 cpu_maps_update_begin();
1023 first_cpu = cpumask_first(cpu_online_mask);
1025 * We take down all of the non-boot CPUs in one shot to avoid races
1026 * with the userspace trying to use the CPU hotplug at the same time
1028 cpumask_clear(frozen_cpus);
1030 pr_info("Disabling non-boot CPUs ...\n");
1031 for_each_online_cpu(cpu) {
1032 if (cpu == first_cpu)
1034 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1035 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1036 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1038 cpumask_set_cpu(cpu, frozen_cpus);
1040 pr_err("Error taking CPU%d down: %d\n", cpu, error);
1046 BUG_ON(num_online_cpus() > 1);
1048 pr_err("Non-boot CPUs are not disabled\n");
1051 * Make sure the CPUs won't be enabled by someone else. We need to do
1052 * this even in case of failure as all disable_nonboot_cpus() users are
1053 * supposed to do enable_nonboot_cpus() on the failure path.
1055 cpu_hotplug_disabled++;
1057 cpu_maps_update_done();
1061 void __weak arch_enable_nonboot_cpus_begin(void)
1065 void __weak arch_enable_nonboot_cpus_end(void)
1069 void enable_nonboot_cpus(void)
1073 /* Allow everyone to use the CPU hotplug again */
1074 cpu_maps_update_begin();
1075 WARN_ON(--cpu_hotplug_disabled < 0);
1076 if (cpumask_empty(frozen_cpus))
1079 pr_info("Enabling non-boot CPUs ...\n");
1081 arch_enable_nonboot_cpus_begin();
1083 for_each_cpu(cpu, frozen_cpus) {
1084 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1085 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1086 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1088 pr_info("CPU%d is up\n", cpu);
1091 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1094 arch_enable_nonboot_cpus_end();
1096 cpumask_clear(frozen_cpus);
1098 cpu_maps_update_done();
1101 static int __init alloc_frozen_cpus(void)
1103 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1107 core_initcall(alloc_frozen_cpus);
1110 * When callbacks for CPU hotplug notifications are being executed, we must
1111 * ensure that the state of the system with respect to the tasks being frozen
1112 * or not, as reported by the notification, remains unchanged *throughout the
1113 * duration* of the execution of the callbacks.
1114 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1116 * This synchronization is implemented by mutually excluding regular CPU
1117 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1118 * Hibernate notifications.
1121 cpu_hotplug_pm_callback(struct notifier_block *nb,
1122 unsigned long action, void *ptr)
1126 case PM_SUSPEND_PREPARE:
1127 case PM_HIBERNATION_PREPARE:
1128 cpu_hotplug_disable();
1131 case PM_POST_SUSPEND:
1132 case PM_POST_HIBERNATION:
1133 cpu_hotplug_enable();
1144 static int __init cpu_hotplug_pm_sync_init(void)
1147 * cpu_hotplug_pm_callback has higher priority than x86
1148 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1149 * to disable cpu hotplug to avoid cpu hotplug race.
1151 pm_notifier(cpu_hotplug_pm_callback, 0);
1154 core_initcall(cpu_hotplug_pm_sync_init);
1156 #endif /* CONFIG_PM_SLEEP_SMP */
1158 #endif /* CONFIG_SMP */
1160 /* Boot processor state steps */
1161 static struct cpuhp_step cpuhp_bp_states[] = {
1168 [CPUHP_CREATE_THREADS]= {
1169 .name = "threads:create",
1170 .startup = smpboot_create_threads,
1174 [CPUHP_NOTIFY_PREPARE] = {
1175 .name = "notify:prepare",
1176 .startup = notify_prepare,
1177 .teardown = notify_dead,
1181 [CPUHP_BRINGUP_CPU] = {
1182 .name = "cpu:bringup",
1183 .startup = bringup_cpu,
1187 [CPUHP_TEARDOWN_CPU] = {
1188 .name = "cpu:teardown",
1190 .teardown = takedown_cpu,
1196 /* Application processor state steps */
1197 static struct cpuhp_step cpuhp_ap_states[] = {
1199 [CPUHP_AP_NOTIFY_STARTING] = {
1200 .name = "notify:starting",
1201 .startup = notify_starting,
1202 .teardown = notify_dying,
1206 [CPUHP_AP_SMPBOOT_THREADS] = {
1207 .name = "smpboot:threads",
1208 .startup = smpboot_unpark_threads,
1209 .teardown = smpboot_park_threads,
1211 [CPUHP_AP_NOTIFY_ONLINE] = {
1212 .name = "notify:online",
1213 .startup = notify_online,
1214 .teardown = notify_down_prepare,
1224 /* Sanity check for callbacks */
1225 static int cpuhp_cb_check(enum cpuhp_state state)
1227 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1232 static bool cpuhp_is_ap_state(enum cpuhp_state state)
1234 return state > CPUHP_BRINGUP_CPU;
1237 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
1239 struct cpuhp_step *sp;
1241 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
1245 static void cpuhp_store_callbacks(enum cpuhp_state state,
1247 int (*startup)(unsigned int cpu),
1248 int (*teardown)(unsigned int cpu))
1250 /* (Un)Install the callbacks for further cpu hotplug operations */
1251 struct cpuhp_step *sp;
1253 mutex_lock(&cpuhp_state_mutex);
1254 sp = cpuhp_get_step(state);
1255 sp->startup = startup;
1256 sp->teardown = teardown;
1258 mutex_unlock(&cpuhp_state_mutex);
1261 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1263 return cpuhp_get_step(state)->teardown;
1267 * Call the startup/teardown function for a step either on the AP or
1268 * on the current CPU.
1270 static int cpuhp_issue_call(int cpu, enum cpuhp_state state,
1271 int (*cb)(unsigned int), bool bringup)
1278 * The non AP bound callbacks can fail on bringup. On teardown
1279 * e.g. module removal we crash for now.
1282 if (cpuhp_is_ap_state(state))
1283 ret = cpuhp_invoke_ap_callback(cpu, state, cb);
1285 ret = cpuhp_invoke_callback(cpu, state, cb);
1287 ret = cpuhp_invoke_callback(cpu, state, cb);
1289 BUG_ON(ret && !bringup);
1294 * Called from __cpuhp_setup_state on a recoverable failure.
1296 * Note: The teardown callbacks for rollback are not allowed to fail!
1298 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1299 int (*teardown)(unsigned int cpu))
1306 /* Roll back the already executed steps on the other cpus */
1307 for_each_present_cpu(cpu) {
1308 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1309 int cpustate = st->state;
1311 if (cpu >= failedcpu)
1314 /* Did we invoke the startup call on that cpu ? */
1315 if (cpustate >= state)
1316 cpuhp_issue_call(cpu, state, teardown, false);
1321 * Returns a free for dynamic slot assignment of the Online state. The states
1322 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1323 * by having no name assigned.
1325 static int cpuhp_reserve_state(enum cpuhp_state state)
1329 mutex_lock(&cpuhp_state_mutex);
1330 for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
1331 if (cpuhp_ap_states[i].name)
1334 cpuhp_ap_states[i].name = "Reserved";
1335 mutex_unlock(&cpuhp_state_mutex);
1338 mutex_unlock(&cpuhp_state_mutex);
1339 WARN(1, "No more dynamic states available for CPU hotplug\n");
1344 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1345 * @state: The state to setup
1346 * @invoke: If true, the startup function is invoked for cpus where
1347 * cpu state >= @state
1348 * @startup: startup callback function
1349 * @teardown: teardown callback function
1351 * Returns 0 if successful, otherwise a proper error code
1353 int __cpuhp_setup_state(enum cpuhp_state state,
1354 const char *name, bool invoke,
1355 int (*startup)(unsigned int cpu),
1356 int (*teardown)(unsigned int cpu))
1361 if (cpuhp_cb_check(state) || !name)
1366 /* currently assignments for the ONLINE state are possible */
1367 if (state == CPUHP_AP_ONLINE_DYN) {
1369 ret = cpuhp_reserve_state(state);
1375 cpuhp_store_callbacks(state, name, startup, teardown);
1377 if (!invoke || !startup)
1381 * Try to call the startup callback for each present cpu
1382 * depending on the hotplug state of the cpu.
1384 for_each_present_cpu(cpu) {
1385 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1386 int cpustate = st->state;
1388 if (cpustate < state)
1391 ret = cpuhp_issue_call(cpu, state, startup, true);
1393 cpuhp_rollback_install(cpu, state, teardown);
1394 cpuhp_store_callbacks(state, NULL, NULL, NULL);
1400 if (!ret && dyn_state)
1404 EXPORT_SYMBOL(__cpuhp_setup_state);
1407 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1408 * @state: The state to remove
1409 * @invoke: If true, the teardown function is invoked for cpus where
1410 * cpu state >= @state
1412 * The teardown callback is currently not allowed to fail. Think
1413 * about module removal!
1415 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1417 int (*teardown)(unsigned int cpu) = cpuhp_get_teardown_cb(state);
1420 BUG_ON(cpuhp_cb_check(state));
1424 if (!invoke || !teardown)
1428 * Call the teardown callback for each present cpu depending
1429 * on the hotplug state of the cpu. This function is not
1430 * allowed to fail currently!
1432 for_each_present_cpu(cpu) {
1433 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1434 int cpustate = st->state;
1436 if (cpustate >= state)
1437 cpuhp_issue_call(cpu, state, teardown, false);
1440 cpuhp_store_callbacks(state, NULL, NULL, NULL);
1443 EXPORT_SYMBOL(__cpuhp_remove_state);
1445 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1446 static ssize_t show_cpuhp_state(struct device *dev,
1447 struct device_attribute *attr, char *buf)
1449 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1451 return sprintf(buf, "%d\n", st->state);
1453 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1455 static ssize_t write_cpuhp_target(struct device *dev,
1456 struct device_attribute *attr,
1457 const char *buf, size_t count)
1459 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1460 struct cpuhp_step *sp;
1463 ret = kstrtoint(buf, 10, &target);
1467 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1468 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1471 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1475 ret = lock_device_hotplug_sysfs();
1479 mutex_lock(&cpuhp_state_mutex);
1480 sp = cpuhp_get_step(target);
1481 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1482 mutex_unlock(&cpuhp_state_mutex);
1486 if (st->state < target)
1487 ret = do_cpu_up(dev->id, target);
1489 ret = do_cpu_down(dev->id, target);
1491 unlock_device_hotplug();
1492 return ret ? ret : count;
1495 static ssize_t show_cpuhp_target(struct device *dev,
1496 struct device_attribute *attr, char *buf)
1498 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1500 return sprintf(buf, "%d\n", st->target);
1502 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1504 static struct attribute *cpuhp_cpu_attrs[] = {
1505 &dev_attr_state.attr,
1506 &dev_attr_target.attr,
1510 static struct attribute_group cpuhp_cpu_attr_group = {
1511 .attrs = cpuhp_cpu_attrs,
1516 static ssize_t show_cpuhp_states(struct device *dev,
1517 struct device_attribute *attr, char *buf)
1519 ssize_t cur, res = 0;
1522 mutex_lock(&cpuhp_state_mutex);
1523 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
1524 struct cpuhp_step *sp = cpuhp_get_step(i);
1527 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1532 mutex_unlock(&cpuhp_state_mutex);
1535 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1537 static struct attribute *cpuhp_cpu_root_attrs[] = {
1538 &dev_attr_states.attr,
1542 static struct attribute_group cpuhp_cpu_root_attr_group = {
1543 .attrs = cpuhp_cpu_root_attrs,
1548 static int __init cpuhp_sysfs_init(void)
1552 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
1553 &cpuhp_cpu_root_attr_group);
1557 for_each_possible_cpu(cpu) {
1558 struct device *dev = get_cpu_device(cpu);
1562 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
1568 device_initcall(cpuhp_sysfs_init);
1572 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1573 * represents all NR_CPUS bits binary values of 1<<nr.
1575 * It is used by cpumask_of() to get a constant address to a CPU
1576 * mask value that has a single bit set only.
1579 /* cpu_bit_bitmap[0] is empty - so we can back into it */
1580 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
1581 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1582 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1583 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1585 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1587 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1588 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1589 #if BITS_PER_LONG > 32
1590 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1591 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
1594 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
1596 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1597 EXPORT_SYMBOL(cpu_all_bits);
1599 #ifdef CONFIG_INIT_ALL_POSSIBLE
1600 struct cpumask __cpu_possible_mask __read_mostly
1603 struct cpumask __cpu_possible_mask __read_mostly;
1605 EXPORT_SYMBOL(__cpu_possible_mask);
1607 struct cpumask __cpu_online_mask __read_mostly;
1608 EXPORT_SYMBOL(__cpu_online_mask);
1610 struct cpumask __cpu_present_mask __read_mostly;
1611 EXPORT_SYMBOL(__cpu_present_mask);
1613 struct cpumask __cpu_active_mask __read_mostly;
1614 EXPORT_SYMBOL(__cpu_active_mask);
1616 void init_cpu_present(const struct cpumask *src)
1618 cpumask_copy(&__cpu_present_mask, src);
1621 void init_cpu_possible(const struct cpumask *src)
1623 cpumask_copy(&__cpu_possible_mask, src);
1626 void init_cpu_online(const struct cpumask *src)
1628 cpumask_copy(&__cpu_online_mask, src);
1632 * Activate the first processor.
1634 void __init boot_cpu_init(void)
1636 int cpu = smp_processor_id();
1638 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1639 set_cpu_online(cpu, true);
1640 set_cpu_active(cpu, true);
1641 set_cpu_present(cpu, true);
1642 set_cpu_possible(cpu, true);
1646 * Must be called _AFTER_ setting up the per_cpu areas
1648 void __init boot_cpu_state_init(void)
1650 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;